aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/c-octeon.c
blob: 729e7702b1de86212653cf56b49662aa0637c1d9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2005-2007 Cavium Networks
 */
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/io.h>

#include <asm/bcache.h>
#include <asm/bootinfo.h>
#include <asm/cacheops.h>
#include <asm/cpu-features.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/traps.h>
#include <asm/mmu_context.h>
#include <asm/war.h>

#include <asm/octeon/octeon.h>

unsigned long long cache_err_dcache[NR_CPUS];
EXPORT_SYMBOL_GPL(cache_err_dcache);

/**
 * Octeon automatically flushes the dcache on tlb changes, so
 * from Linux's viewpoint it acts much like a physically
 * tagged cache. No flushing is needed
 *
 */
static void octeon_flush_data_cache_page(unsigned long addr)
{
    /* Nothing to do */
}

static inline void octeon_local_flush_icache(void)
{
	asm volatile ("synci 0($0)");
}

/*
 * Flush local I-cache for the specified range.
 */
static void local_octeon_flush_icache_range(unsigned long start,
					    unsigned long end)
{
	octeon_local_flush_icache();
}

/**
 * Flush caches as necessary for all cores affected by a
 * vma. If no vma is supplied, all cores are flushed.
 *
 * @vma:    VMA to flush or NULL to flush all icaches.
 */
static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
{
	extern void octeon_send_ipi_single(int cpu, unsigned int action);
#ifdef CONFIG_SMP
	int cpu;
	cpumask_t mask;
#endif

	mb();
	octeon_local_flush_icache();
#ifdef CONFIG_SMP
	preempt_disable();
	cpu = smp_processor_id();

	/*
	 * If we have a vma structure, we only need to worry about
	 * cores it has been used on
	 */
	if (vma)
		mask = *mm_cpumask(vma->vm_mm);
	else
		mask = *cpu_online_mask;
	cpumask_clear_cpu(cpu, &mask);
	for_each_cpu(cpu, &mask)
		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);

	preempt_enable();
#endif
}


/**
 * Called to flush the icache on all cores
 */
static void octeon_flush_icache_all(void)
{
	octeon_flush_icache_all_cores(NULL);
}


/**
 * Called to flush all memory associated with a memory
 * context.
 *
 * @mm:	    Memory context to flush
 */
static void octeon_flush_cache_mm(struct mm_struct *mm)
{
	/*
	 * According to the R4K version of this file, CPUs without
	 * dcache aliases don't need to do anything here
	 */
}


/**
 * Flush a range of kernel addresses out of the icache
 *
 */
static void octeon_flush_icache_range(unsigned long start, unsigned long end)
{
	octeon_flush_icache_all_cores(NULL);
}


/**
 * Flush the icache for a trampoline. These are used for interrupt
 * and exception hooking.
 *
 * @addr:   Address to flush
 */
static void octeon_flush_cache_sigtramp(unsigned long addr)
{
	struct vm_area_struct *vma;

	vma = find_vma(current->mm, addr);
	octeon_flush_icache_all_cores(vma);
}


/**
 * Flush a range out of a vma
 *
 * @vma:    VMA to flush
 * @start:
 * @end:
 */
static void octeon_flush_cache_range(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end)
{
	if (vma->vm_flags & VM_EXEC)
		octeon_flush_icache_all_cores(vma);
}


/**
 * Flush a specific page of a vma
 *
 * @vma:    VMA to flush page for
 * @page:   Page to flush
 * @pfn:
 */
static void octeon_flush_cache_page(struct vm_area_struct *vma,
				    unsigned long page, unsigned long pfn)
{
	if (vma->vm_flags & VM_EXEC)
		octeon_flush_icache_all_cores(vma);
}

static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
	BUG();
}

/**
 * Probe Octeon's caches
 *
 */
static void probe_octeon(void)
{
	unsigned long icache_size;
	unsigned long dcache_size;
	unsigned int config1;
	struct cpuinfo_mips *c = &current_cpu_data;

	config1 = read_c0_config1();
	switch (c->cputype) {
	case CPU_CAVIUM_OCTEON:
	case CPU_CAVIUM_OCTEON_PLUS:
		c->icache.linesz = 2 << ((config1 >> 19) & 7);
		c->icache.sets = 64 << ((config1 >> 22) & 7);
		c->icache.ways = 1 + ((config1 >> 16) & 7);
		c->icache.flags |= MIPS_CACHE_VTAG;
		icache_size =
			c->icache.sets * c->icache.ways * c->icache.linesz;
		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
		c->dcache.linesz = 128;
		if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
		else
			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
		c->dcache.ways = 64;
		dcache_size =
			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
		c->options |= MIPS_CPU_PREFETCH;
		break;

	case CPU_CAVIUM_OCTEON2:
		c->icache.linesz = 2 << ((config1 >> 19) & 7);
		c->icache.sets = 8;
		c->icache.ways = 37;
		c->icache.flags |= MIPS_CACHE_VTAG;
		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;

		c->dcache.linesz = 128;
		c->dcache.ways = 32;
		c->dcache.sets = 8;
		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
		c->options |= MIPS_CPU_PREFETCH;
		break;

	case CPU_CAVIUM_OCTEON3:
		c->icache.linesz = 128;
		c->icache.sets = 16;
		c->icache.ways = 39;
		c->icache.flags |= MIPS_CACHE_VTAG;
		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;

		c->dcache.linesz = 128;
		c->dcache.ways = 32;
		c->dcache.sets = 8;
		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
		c->options |= MIPS_CPU_PREFETCH;
		break;

	default:
		panic("Unsupported Cavium Networks CPU type");
		break;
	}

	/* compute a couple of other cache variables */
	c->icache.waysize = icache_size / c->icache.ways;
	c->dcache.waysize = dcache_size / c->dcache.ways;

	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);

	if (smp_processor_id() == 0) {
		pr_notice("Primary instruction cache %ldkB, %s, %d way, "
			  "%d sets, linesize %d bytes.\n",
			  icache_size >> 10,
			  cpu_has_vtag_icache ?
				"virtually tagged" : "physically tagged",
			  c->icache.ways, c->icache.sets, c->icache.linesz);

		pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
			  "linesize %d bytes.\n",
			  dcache_size >> 10, c->dcache.ways,
			  c->dcache.sets, c->dcache.linesz);
	}
}

static void  octeon_cache_error_setup(void)
{
	extern char except_vec2_octeon;
	set_handler(0x100, &except_vec2_octeon, 0x80);
}

/**
 * Setup the Octeon cache flush routines
 *
 */
void octeon_cache_init(void)
{
	probe_octeon();

	shm_align_mask = PAGE_SIZE - 1;

	flush_cache_all			= octeon_flush_icache_all;
	__flush_cache_all		= octeon_flush_icache_all;
	flush_cache_mm			= octeon_flush_cache_mm;
	flush_cache_page		= octeon_flush_cache_page;
	flush_cache_range		= octeon_flush_cache_range;
	flush_cache_sigtramp		= octeon_flush_cache_sigtramp;
	flush_icache_all		= octeon_flush_icache_all;
	flush_data_cache_page		= octeon_flush_data_cache_page;
	flush_icache_range		= octeon_flush_icache_range;
	local_flush_icache_range	= local_octeon_flush_icache_range;

	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;

	build_clear_page();
	build_copy_page();

	board_cache_error_setup = octeon_cache_error_setup;
}

/*
 * Handle a cache error exception
 */
static RAW_NOTIFIER_HEAD(co_cache_error_chain);

int register_co_cache_error_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&co_cache_error_chain, nb);
}
EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);

int unregister_co_cache_error_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);

static void co_cache_error_call_notifiers(unsigned long val)
{
	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
		u64 dcache_err;
		unsigned long coreid = cvmx_get_core_num();
		u64 icache_err = read_octeon_c0_icacheerr();

		if (val) {
			dcache_err = cache_err_dcache[coreid];
			cache_err_dcache[coreid] = 0;
		} else {
			dcache_err = read_octeon_c0_dcacheerr();
		}

		pr_err("Core%lu: Cache error exception:\n", coreid);
		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
		if (icache_err & 1) {
			pr_err("CacheErr (Icache) == %llx\n",
			       (unsigned long long)icache_err);
			write_octeon_c0_icacheerr(0);
		}
		if (dcache_err & 1) {
			pr_err("CacheErr (Dcache) == %llx\n",
			       (unsigned long long)dcache_err);
		}
	}
}

/*
 * Called when the the exception is recoverable
 */

asmlinkage void cache_parity_error_octeon_recoverable(void)
{
	co_cache_error_call_notifiers(0);
}

/**
 * Called when the the exception is not recoverable
 */

asmlinkage void cache_parity_error_octeon_non_recoverable(void)
{
	co_cache_error_call_notifiers(1);
	panic("Can't handle cache error: nested exception");
}
s='rem' style='width: 0.0%;'/> -rw-r--r--arch/arm/mach-msm/platsmp.c6
-rw-r--r--arch/arm/mach-msm/timer.c4
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-mvebu/headsmp.S2
-rw-r--r--arch/arm/mach-mvebu/platsmp.c5
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-generic.c23
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c2
-rw-r--r--arch/arm/mach-omap2/omap-smp.c4
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c4
-rw-r--r--arch/arm/mach-omap2/omap_device.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h50
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c3
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-prima2/headsmp.S2
-rw-r--r--arch/arm/mach-prima2/platsmp.c4
-rw-r--r--arch/arm/mach-pxa/em-x270.c17
-rw-r--r--arch/arm/mach-pxa/mainstone.c3
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c3
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/stargate2.c3
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig2
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2410.c161
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c3
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c3
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c8
-rw-r--r--arch/arm/mach-shmobile/board-lager.c2
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S1
-rw-r--r--arch/arm/mach-shmobile/headsmp.S2
-rw-r--r--arch/arm/mach-shmobile/smp-emev2.c2
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c2
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c2
-rw-r--r--arch/arm/mach-socfpga/headsmp.S1
-rw-r--r--arch/arm/mach-socfpga/platsmp.c2
-rw-r--r--arch/arm/mach-spear/generic.h2
-rw-r--r--arch/arm/mach-spear/platsmp.c4
-rw-r--r--arch/arm/mach-sti/Kconfig3
-rw-r--r--arch/arm/mach-sti/headsmp.S2
-rw-r--r--arch/arm/mach-sti/platsmp.c6
-rw-r--r--arch/arm/mach-tegra/platsmp.c4
-rw-r--r--arch/arm/mach-tegra/pm.c2
-rw-r--r--arch/arm/mach-ux500/platsmp.c4
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mach-zynq/common.h2
-rw-r--r--arch/arm/mach-zynq/headsmp.S2
-rw-r--r--arch/arm/mach-zynq/platsmp.c6
-rw-r--r--arch/arm/mm/Kconfig34
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/mmu.c57
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-3level.S6
-rw-r--r--arch/arm/mm/proc-v7.S13
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-samsung/Kconfig7
-rw-r--r--arch/arm/plat-samsung/Makefile2
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h8
-rw-r--r--arch/arm/plat-samsung/pm.c14
-rw-r--r--arch/arm/plat-versatile/platsmp.c6
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm64/include/asm/arch_timer.h2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h7
-rw-r--r--arch/arm64/include/asm/system_misc.h3
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/virt.h13
-rw-r--r--arch/arm64/kernel/debug-monitors.c6
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm64/kernel/process.c4
-rw-r--r--arch/arm64/kernel/smp.c23
-rw-r--r--arch/arm64/mm/fault.c46
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/blackfin/kernel/setup.c4
-rw-r--r--arch/blackfin/mach-bf561/smp.c6
-rw-r--r--arch/blackfin/mach-common/cache-c.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/blackfin/mach-common/smp.c18
-rw-r--r--arch/cris/arch-v32/kernel/smp.c2
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/hexagon/kernel/setup.c2
-rw-r--r--arch/hexagon/kernel/smp.c4
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig2
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/metag/kernel/perf/perf_event.c6
-rw-r--r--arch/metag/kernel/smp.c22
-rw-r--r--arch/metag/kernel/traps.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/ath79/setup.c2
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c12
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c5
-rw-r--r--arch/mips/cavium-octeon/smp.c6
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h4
-rw-r--r--arch/mips/include/asm/uasm.h37
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h7
-rw-r--r--arch/mips/kernel/bmips_vec.S10
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c2
-rw-r--r--arch/mips/kernel/cevt-gic.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c2
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c14
-rw-r--r--arch/mips/kernel/head.S4
-rw-r--r--arch/mips/kernel/smp-bmips.c30
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c6
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/spram.c14
-rw-r--r--arch/mips/kernel/sync-r4k.c12
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/watch.c2
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lib/uncached.c2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c34
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/cex-sb1.S4
-rw-r--r--arch/mips/mm/page.c40
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-mips.c2
-rw-r--r--arch/mips/mm/sc-r5k.c2
-rw-r--r--arch/mips/mm/sc-rm7k.c12
-rw-r--r--arch/mips/mm/tlb-r3k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c4
-rw-r--r--arch/mips/mm/tlbex.c148
-rw-r--r--arch/mips/mm/uasm-micromips.c10
-rw-r--r--arch/mips/mm/uasm-mips.c10
-rw-r--r--arch/mips/mm/uasm.c106
-rw-r--r--arch/mips/mti-malta/malta-smtc.c6
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/mti-sead3/sead3-time.c2
-rw-r--r--arch/mips/netlogic/common/irq.c68
-rw-r--r--arch/mips/netlogic/common/smp.c4
-rw-r--r--arch/mips/netlogic/common/smpboot.S4
-rw-r--r--arch/mips/netlogic/common/time.c2
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts3
-rw-r--r--arch/mips/netlogic/dts/xlp_svp.dts3
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c2
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pci/pci-ip27.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smtc.c7
-rw-r--r--arch/mips/pmcs-msp71xx/msp_time.c2
-rw-r--r--arch/mips/pnx833x/common/interrupts.c2
-rw-r--r--arch/mips/pnx833x/common/platform.c2
-rw-r--r--arch/mips/powertv/asic/asic_devices.c3
-rw-r--r--arch/mips/powertv/time.c2
-rw-r--r--arch/mips/ralink/irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c6
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c8
-rw-r--r--arch/mips/sibyte/sb1250/smp.c8
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/parisc/configs/c8000_defconfig279
-rw-r--r--arch/parisc/include/asm/parisc-device.h3
-rw-r--r--arch/parisc/kernel/cache.c135
-rw-r--r--arch/parisc/kernel/firmware.c14
-rw-r--r--arch/parisc/kernel/hardware.c2
-rw-r--r--arch/parisc/kernel/inventory.c1
-rw-r--r--arch/parisc/kernel/processor.c6
-rw-r--r--arch/parisc/kernel/signal.c7
-rw-r--r--arch/parisc/kernel/signal32.c1
-rw-r--r--arch/parisc/kernel/smp.c8
-rw-r--r--arch/parisc/kernel/sys32.h36
-rw-r--r--arch/parisc/kernel/sys_parisc32.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/eeh.h30
-rw-r--r--arch/powerpc/include/asm/hw_irq.h7
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h6
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/reg.h34
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h9
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/perf_event.h18
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/eeh.c72
-rw-r--r--arch/powerpc/kernel/eeh_cache.c18
-rw-r--r--arch/powerpc/kernel/eeh_driver.c77
-rw-r--r--arch/powerpc/kernel/eeh_pe.c58
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c21
-rw-r--r--arch/powerpc/kernel/entry_64.S36
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S5
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c49
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c56
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom_init.c5
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kernel/traps.c58
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/mm/hash_native_64.c12
-rw-r--r--arch/powerpc/mm/numa.c59
-rw-r--r--arch/powerpc/perf/core-book3s.c7
-rw-r--r--arch/powerpc/perf/power8-pmu.c30
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c17
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c67
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c80
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/boot/compressed/Makefile9
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/include/asm/bitops.h2
-rw-r--r--arch/s390/include/asm/processor.h10
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h1
-rw-r--r--arch/s390/kernel/cache.c15
-rw-r--r--arch/s390/kernel/crash_dump.c51
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/perf_event.c9
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c50
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c17
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c21
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/net/bpf_jit_comp.c113
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/score/mm/tlb-score.c2
-rw-r--r--arch/sh/configs/sh03_defconfig2
-rw-r--r--arch/sh/kernel/cpu/init.c18
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c6
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c2
-rw-r--r--arch/sh/kernel/perf_event.c4
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/smp.c8
-rw-r--r--arch/sh/kernel/traps_32.c2
-rw-r--r--arch/sh/kernel/traps_64.c2
-rw-r--r--arch/sh/mm/tlb-sh5.c2
-rw-r--r--arch/sparc/kernel/ds.c11
-rw-r--r--arch/sparc/kernel/entry.h2
-rw-r--r--arch/sparc/kernel/hvtramp.S1
-rw-r--r--arch/sparc/kernel/irq_64.c5
-rw-r--r--arch/sparc/kernel/leon_smp.c10
-rw-r--r--arch/sparc/kernel/mdesc.c34
-rw-r--r--arch/sparc/kernel/smp_32.c20
-rw-r--r--arch/sparc/kernel/smp_64.c9
-rw-r--r--arch/sparc/kernel/sun4d_smp.c6
-rw-r--r--arch/sparc/kernel/sun4m_smp.c6
-rw-r--r--arch/sparc/kernel/sysfs.c4
-rw-r--r--arch/sparc/kernel/trampoline_32.S3
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c12
-rw-r--r--arch/tile/kernel/irq.c2
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/setup.c12
-rw-r--r--arch/tile/kernel/smpboot.c8
-rw-r--r--arch/tile/kernel/time.c2
-rw-r--r--arch/um/include/shared/frame_kern.h8
-rw-r--r--arch/um/kernel/signal.c4
-rw-r--r--arch/um/kernel/skas/mmu.c2
-rw-r--r--arch/um/kernel/skas/uaccess.c2
-rw-r--r--arch/um/os-Linux/mem.c230
-rw-r--r--arch/um/os-Linux/signal.c8
-rw-r--r--arch/um/os-Linux/skas/process.c19
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S643
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c151
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/microcode.h4
-rw-r--r--arch/x86/include/asm/microcode_amd.h4
-rw-r--r--arch/x86/include/asm/microcode_intel.h4
-rw-r--r--arch/x86/include/asm/mmconfig.h4
-rw-r--r--arch/x86/include/asm/mpspec.h2
-rw-r--r--arch/x86/include/asm/numa.h6
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/prom.h2
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/acpi/sleep.c18
-rw-r--r--arch/x86/kernel/apic/apic.c30
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/numaq_32.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c14
-rw-r--r--arch/x86/kernel/cpu/amd.c33
-rw-r--r--arch/x86/kernel/cpu/centaur.c26
-rw-r--r--arch/x86/kernel/cpu/common.c64
-rw-r--r--arch/x86/kernel/cpu/cyrix.c40
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c30
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c55
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c20
-rw-r--r--arch/x86/kernel/cpu/rdrand.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c4
-rw-r--r--arch/x86/kernel/cpu/topology.c2
-rw-r--r--arch/x86/kernel/cpu/transmeta.c6
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/cpuid.c7
-rw-r--r--arch/x86/kernel/devicetree.c2
-rw-r--r--arch/x86/kernel/early-quirks.c14
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/kernel/head_64.S15
-rw-r--r--arch/x86/kernel/i387.c12
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kvm.c10
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c9
-rw-r--r--arch/x86/kernel/microcode_amd_early.c8
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/microcode_core_early.c6
-rw-r--r--arch/x86/kernel/microcode_intel_early.c26
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c12
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/tboot.c6
-rw-r--r--arch/x86/kernel/tracepoint.c6
-rw-r--r--arch/x86/kernel/traps.c12
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/tsc_sync.c18
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kernel/xsave.c4
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/numa.c12
-rw-r--r--arch/x86/mm/numa_emulation.c12
-rw-r--r--arch/x86/mm/setup_nx.c4
-rw-r--r--arch/x86/pci/amd_bus.c8
-rw-r--r--arch/x86/platform/ce4100/ce4100.c4
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/um/signal.c1
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/smp.c12
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--block/blk-iopoll.c6
-rw-r--r--block/blk-softirq.c6
-rw-r--r--crypto/Kconfig19
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/crct10dif.c178
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/testmgr.c10
-rw-r--r--crypto/testmgr.h33
-rw-r--r--drivers/accessibility/braille/braille_console.c9
-rw-r--r--drivers/acpi/acpi_memhotplug.c1
-rw-r--r--drivers/acpi/acpi_processor.c5
-rw-r--r--drivers/acpi/acpica/aclocal.h13
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/glue.c133
-rw-r--r--drivers/acpi/internal.h9
-rw-r--r--drivers/acpi/proc.c8
-rw-r--r--drivers/acpi/processor_core.c8
-rw-r--r--drivers/acpi/processor_driver.c8
-rw-r--r--drivers/acpi/processor_idle.c6
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/acpi/video.c33
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/Kconfig11
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_imx.c236
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_imx.c1
-rw-r--r--drivers/ata/sata_inic162x.c14
-rw-r--r--drivers/base/core.c120
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/regmap/regcache.c3
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/base/topology.c10
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/drbd/drbd_actlog.c21
-rw-r--r--drivers/block/drbd/drbd_int.h15
-rw-r--r--drivers/block/drbd/drbd_main.c61
-rw-r--r--drivers/block/drbd/drbd_nl.c185
-rw-r--r--drivers/block/drbd/drbd_receiver.c12
-rw-r--r--drivers/block/drbd/drbd_state.c4
-rw-r--r--drivers/block/rsxx/core.c359
-rw-r--r--drivers/block/rsxx/cregs.c14
-rw-r--r--drivers/block/rsxx/dev.c33
-rw-r--r--drivers/block/rsxx/dma.c185
-rw-r--r--drivers/block/rsxx/rsxx_priv.h10
-rw-r--r--drivers/block/xen-blkback/blkback.c872
-rw-r--r--drivers/block/xen-blkback/common.h147
-rw-r--r--drivers/block/xen-blkback/xenbus.c85
-rw-r--r--drivers/block/xen-blkfront.c532
-rw-r--r--drivers/bluetooth/ath3k.c46
-rw-r--r--drivers/bluetooth/btusb.c18
-rw-r--r--drivers/char/agp/parisc-agp.c6
-rw-r--r--drivers/char/virtio_console.c70
-rw-r--r--drivers/clocksource/arm_arch_timer.c8
-rw-r--r--drivers/clocksource/arm_global_timer.c8
-rw-r--r--drivers/clocksource/dummy_timer.c6
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/metag_generic.c6
-rw-r--r--drivers/clocksource/time-armada-370-xp.c4
-rw-r--r--drivers/clocksource/timer-marco.c4
-rw-r--r--drivers/cpufreq/cpufreq.c25
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/cpufreq_governor.h4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/cpufreq/cpufreq_stats.c8
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c16
-rw-r--r--drivers/cpufreq/longhaul.c6
-rw-r--r--drivers/cpufreq/longhaul.h26
-rw-r--r--drivers/cpufreq/longrun.c6
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c11
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c8
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c4
-rw-r--r--drivers/cpuidle/governors/menu.c106
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/pl330.c93
-rw-r--r--drivers/dma/sh/shdma.c4
-rw-r--r--drivers/edac/edac_mc.c9
-rw-r--r--drivers/edac/edac_mc_sysfs.c28
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firmware/dmi_scan.c14
-rw-r--r--drivers/firmware/efi/efivars.c3
-rw-r--r--drivers/gpio/gpio-msm-v1.c1
-rw-r--r--drivers/gpio/gpio-msm-v2.c2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c101
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c24
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c65
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c38
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c46
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h76
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c319
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c43
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c73
-rw-r--r--drivers/gpu/drm/radeon/cikd.h16
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c19
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c32
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h3
-rw-r--r--drivers/gpu/drm/radeon/ni.c198
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/nid.h16
-rw-r--r--drivers/gpu/drm/radeon/r100.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c146
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c43
-rw-r--r--drivers/gpu/drm/radeon/r600d.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c174
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c25
-rw-r--r--drivers/gpu/drm/radeon/rs780d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c77
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c47
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/si.c331
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c100
-rw-r--r--drivers/gpu/drm/radeon/sid.h14
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c9
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c9
-rw-r--r--drivers/hid/hid-logitech-dj.c45
-rw-r--r--drivers/hid/hid-logitech-dj.h1
-rw-r--r--drivers/hid/hid-sony.c3
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hv/hv_balloon.c21
-rw-r--r--drivers/hv/vmbus_drv.c8
-rw-r--r--drivers/hwmon/abx500.c2
-rw-r--r--drivers/hwmon/adt7470.c2
-rw-r--r--drivers/hwmon/coretemp.c39
-rw-r--r--drivers/hwmon/max6697.c4
-rw-r--r--drivers/hwmon/via-cputemp.c8
-rw-r--r--drivers/i2c/busses/i2c-kempld.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c31
-rw-r--r--drivers/iio/dac/ad7303.c4
-rw-r--r--drivers/iio/industrialio-trigger.c36
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c6
-rw-r--r--drivers/infiniband/core/cma.c29
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c76
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/irqchip/irq-gic.c8
-rw-r--r--drivers/macintosh/windfarm_rm31.c18
-rw-r--r--drivers/md/bcache/alloc.c46
-rw-r--r--drivers/md/bcache/bcache.h61
-rw-r--r--drivers/md/bcache/bset.c56
-rw-r--r--drivers/md/bcache/bset.h4
-rw-r--r--drivers/md/bcache/btree.c451
-rw-r--r--drivers/md/bcache/btree.h35
-rw-r--r--drivers/md/bcache/closure.c6
-rw-r--r--drivers/md/bcache/debug.c178
-rw-r--r--drivers/md/bcache/debug.h11
-rw-r--r--drivers/md/bcache/io.c68
-rw-r--r--drivers/md/bcache/journal.c25
-rw-r--r--drivers/md/bcache/movinggc.c24
-rw-r--r--drivers/md/bcache/request.c197
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c171
-rw-r--r--drivers/md/bcache/sysfs.c68
-rw-r--r--drivers/md/bcache/trace.c47
-rw-r--r--drivers/md/bcache/util.c17
-rw-r--r--drivers/md/bcache/util.h6
-rw-r--r--drivers/md/bcache/writeback.c133
-rw-r--r--drivers/md/bcache/writeback.h64
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid1.c53
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c79
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c46
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c11
-rw-r--r--drivers/media/usb/usbtv/Kconfig2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c51
-rw-r--r--drivers/misc/atmel-ssc.c11
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hw-me.c14
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c10
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/ethernet/allwinner/Kconfig26
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c40
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c24
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c14
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c38
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c130
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c95
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c62
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c101
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c46
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c48
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/sfc/filter.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvlan.c23
-rw-r--r--drivers/net/macvtap.c65
-rw-r--r--drivers/net/phy/mdio-sun4i.c14
-rw-r--r--drivers/net/tun.c62
-rw-r--r--drivers/net/usb/ax88179_178a.c9
-rw-r--r--drivers/net/usb/r8152.c126
-rw-r--r--drivers/net/usb/r815x.c62
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/vxlan.c63
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c44
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c5
-rw-r--r--drivers/net/wireless/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c65
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c5
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c4
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/mwifiex/init.c10
-rw-r--r--drivers/net/wireless/mwifiex/join.c6
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c95
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h3
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c18
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig72
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile10
-rw-r--r--drivers/net/wireless/rtlwifi/base.c19
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c22
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c16
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c9
-rw-r--r--drivers/net/xen-netfront.c31
-rw-r--r--drivers/of/irq.c6
-rw-r--r--drivers/oprofile/timer_int.c4
-rw-r--r--drivers/parisc/iosapic.c38
-rw-r--r--drivers/pci/host/pci-mvebu.c27
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c9
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pci/pci-acpi.c15
-rw-r--r--drivers/pci/pcie/Kconfig5
-rw-r--r--drivers/pci/setup-bus.c69
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c24
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c2
-rw-r--r--drivers/pnp/resource.c1
-rw-r--r--drivers/rapidio/rio.c4
-rw-r--r--drivers/rtc/rtc-twl.c3
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c1
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c22
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/isci/task.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c20
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c11
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/spi/spi-altera.c12
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-nuc900.c13
-rw-r--r--drivers/spi/spi-s3c64xx.c3
-rw-r--r--drivers/spi/spi-xilinx.c16
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/logger.c4
-rw-r--r--drivers/staging/comedi/TODO2
-rw-r--r--drivers/staging/comedi/comedi_fops.c32
-rw-r--r--drivers/staging/csr/Kconfig9
-rw-r--r--drivers/staging/csr/LICENSE.txt39
-rw-r--r--drivers/staging/csr/Makefile73
-rw-r--r--drivers/staging/csr/bh.c404
-rw-r--r--drivers/staging/csr/csr_framework_ext.c40
-rw-r--r--drivers/staging/csr/csr_framework_ext.h35
-rw-r--r--drivers/staging/csr/csr_framework_ext_types.h30
-rw-r--r--drivers/staging/csr/csr_log.h223
-rw-r--r--drivers/staging/csr/csr_log_configure.h39
-rw-r--r--drivers/staging/csr/csr_log_text.h124
-rw-r--r--drivers/staging/csr/csr_macro.h39
-rw-r--r--drivers/staging/csr/csr_msg_transport.h17
-rw-r--r--drivers/staging/csr/csr_msgconv.c291
-rw-r--r--drivers/staging/csr/csr_msgconv.h78
-rw-r--r--drivers/staging/csr/csr_prim_defs.h55
-rw-r--r--drivers/staging/csr/csr_result.h17
-rw-r--r--drivers/staging/csr/csr_sched.h85
-rw-r--r--drivers/staging/csr/csr_sdio.h723
-rw-r--r--drivers/staging/csr/csr_serialize_primitive_types.c100
-rw-r--r--drivers/staging/csr/csr_time.c33
-rw-r--r--drivers/staging/csr/csr_time.h76
-rw-r--r--drivers/staging/csr/csr_util.c15
-rw-r--r--drivers/staging/csr/csr_wifi_common.h101
-rw-r--r--drivers/staging/csr/csr_wifi_fsm.h240
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_event.h42
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_types.h430
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card.h114
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.c4001
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.h694
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c2595
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c1713
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper.c793
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper.h407
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper_private.h200
-rw-r--r--drivers/staging/csr/csr_wifi_hip_conversions.h73
-rw-r--r--drivers/staging/csr/csr_wifi_hip_download.c819
-rw-r--r--drivers/staging/csr/csr_wifi_hip_dump.c837
-rw-r--r--drivers/staging/csr/csr_wifi_hip_packing.c4804
-rw-r--r--drivers/staging/csr/csr_wifi_hip_send.c415
-rw-r--r--drivers/staging/csr/csr_wifi_hip_signals.c1313
-rw-r--r--drivers/staging/csr/csr_wifi_hip_signals.h128
-rw-r--r--drivers/staging/csr/csr_wifi_hip_sigs.h1417
-rw-r--r--drivers/staging/csr/csr_wifi_hip_ta_sampling.c541
-rw-r--r--drivers/staging/csr/csr_wifi_hip_ta_sampling.h66
-rw-r--r--drivers/staging/csr/csr_wifi_hip_udi.c173
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi.h871
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_signal_names.c41
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_udi.h52
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifihw.h59
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifiversion.h30
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.c1076
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.h119
-rw-r--r--drivers/staging/csr/csr_wifi_hostio_prim.h18
-rw-r--r--drivers/staging/csr/csr_wifi_lib.h103
-rw-r--r--drivers/staging/csr/csr_wifi_msgconv.h49
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_converter_init.c90
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_converter_init.h41
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_free_downstream_contents.c84
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_free_upstream_contents.c39
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_lib.h495
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_prim.h494
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_sef.c30
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_sef.h21
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_serialize.c909
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_serialize.h94
-rw-r--r--drivers/staging/csr/csr_wifi_nme_converter_init.h38
-rw-r--r--drivers/staging/csr/csr_wifi_nme_lib.h991
-rw-r--r--drivers/staging/csr/csr_wifi_nme_prim.h1657
-rw-r--r--drivers/staging/csr/csr_wifi_nme_serialize.h166
-rw-r--r--drivers/staging/csr/csr_wifi_nme_task.h27
-rw-r--r--drivers/staging/csr/csr_wifi_private_common.h81
-rw-r--r--drivers/staging/csr/csr_wifi_result.h27
-rw-r--r--drivers/staging/csr/csr_wifi_router_converter_init.c82
-rw-r--r--drivers/staging/csr/csr_wifi_router_converter_init.h34
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_converter_init.c134
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_converter_init.h34
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_free_downstream_contents.c108
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_free_upstream_contents.c87
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_lib.h2082
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_prim.h2113
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.c46
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.h51
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_serialize.c2591
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_serialize.h333
-rw-r--r--drivers/staging/csr/csr_wifi_router_free_downstream_contents.c53
-rw-r--r--drivers/staging/csr/csr_wifi_router_free_upstream_contents.c47
-rw-r--r--drivers/staging/csr/csr_wifi_router_lib.h417
-rw-r--r--drivers/staging/csr/csr_wifi_router_prim.h421
-rw-r--r--drivers/staging/csr/csr_wifi_router_sef.c19
-rw-r--r--drivers/staging/csr/csr_wifi_router_sef.h25
-rw-r--r--drivers/staging/csr/csr_wifi_router_serialize.c418
-rw-r--r--drivers/staging/csr/csr_wifi_router_serialize.h67
-rw-r--r--drivers/staging/csr/csr_wifi_router_task.h25
-rw-r--r--drivers/staging/csr/csr_wifi_router_transport.c199
-rw-r--r--drivers/staging/csr/csr_wifi_serialize_primitive_types.c256
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_lib.h774
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_prim.h1030
-rw-r--r--drivers/staging/csr/csr_wifi_sme_converter_init.c201
-rw-r--r--drivers/staging/csr/csr_wifi_sme_converter_init.h34
-rw-r--r--drivers/staging/csr/csr_wifi_sme_free_downstream_contents.c187
-rw-r--r--drivers/staging/csr/csr_wifi_sme_free_upstream_contents.c275
-rw-r--r--drivers/staging/csr/csr_wifi_sme_lib.h4303
-rw-r--r--drivers/staging/csr/csr_wifi_sme_prim.h6510
-rw-r--r--drivers/staging/csr/csr_wifi_sme_sef.c85
-rw-r--r--drivers/staging/csr/csr_wifi_sme_sef.h142
-rw-r--r--drivers/staging/csr/csr_wifi_sme_serialize.c5809
-rw-r--r--drivers/staging/csr/csr_wifi_sme_serialize.h666
-rw-r--r--drivers/staging/csr/csr_wifi_sme_task.h25
-rw-r--r--drivers/staging/csr/csr_wifi_vif_utils.h27
-rw-r--r--drivers/staging/csr/data_tx.c54
-rw-r--r--drivers/staging/csr/drv.c2193
-rw-r--r--drivers/staging/csr/firmware.c396
-rw-r--r--drivers/staging/csr/inet.c104
-rw-r--r--drivers/staging/csr/init_hw.c108
-rw-r--r--drivers/staging/csr/io.c1098
-rw-r--r--drivers/staging/csr/mlme.c433
-rw-r--r--drivers/staging/csr/monitor.c384
-rw-r--r--drivers/staging/csr/netdev.c3307
-rw-r--r--drivers/staging/csr/os.c477
-rw-r--r--drivers/staging/csr/putest.c685
-rw-r--r--drivers/staging/csr/sdio_events.c134
-rw-r--r--drivers/staging/csr/sdio_mmc.c1288
-rw-r--r--drivers/staging/csr/sdio_stubs.c82
-rw-r--r--drivers/staging/csr/sme_blocking.c1466
-rw-r--r--drivers/staging/csr/sme_mgt.c1012
-rw-r--r--drivers/staging/csr/sme_native.c566
-rw-r--r--drivers/staging/csr/sme_sys.c3260
-rw-r--r--drivers/staging/csr/sme_userspace.c315
-rw-r--r--drivers/staging/csr/sme_userspace.h38
-rw-r--r--drivers/staging/csr/sme_wext.c3327
-rw-r--r--drivers/staging/csr/ul_int.c528
-rw-r--r--drivers/staging/csr/unifi_clients.h129
-rw-r--r--drivers/staging/csr/unifi_config.h34
-rw-r--r--drivers/staging/csr/unifi_dbg.c110
-rw-r--r--drivers/staging/csr/unifi_event.c692
-rw-r--r--drivers/staging/csr/unifi_native.h257
-rw-r--r--drivers/staging/csr/unifi_os.h122
-rw-r--r--drivers/staging/csr/unifi_pdu_processing.c3729
-rw-r--r--drivers/staging/csr/unifi_priv.h1136
-rw-r--r--drivers/staging/csr/unifi_sme.c1225
-rw-r--r--drivers/staging/csr/unifi_sme.h245
-rw-r--r--drivers/staging/csr/unifi_wext.h108
-rw-r--r--drivers/staging/csr/unifiio.h398
-rw-r--r--drivers/staging/csr/wext_events.c283
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c2
-rw-r--r--drivers/staging/iio/adc/ad7291.c1
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c18
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/line6/pcm.c5
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c7
-rw-r--r--drivers/staging/zcache/zcache-main.c6
-rw-r--r--drivers/staging/zram/zram_drv.c6
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c18
-rw-r--r--drivers/tty/serial/8250/8250_early.c3
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c38
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/tty_port.c5
-rw-r--r--drivers/usb/chipidea/Kconfig4
-rw-r--r--drivers/usb/chipidea/bits.h4
-rw-r--r--drivers/usb/core/hub.c53
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig5
-rw-r--r--drivers/usb/gadget/at91_udc.c16
-rw-r--r--drivers/usb/gadget/ether.c14
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_eem.c7
-rw-r--r--drivers/usb/gadget/f_ncm.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c9
-rw-r--r--drivers/usb/gadget/f_rndis.c7
-rw-r--r--drivers/usb/gadget/f_subset.c7
-rw-r--r--drivers/usb/gadget/fotg210-udc.c4
-rw-r--r--drivers/usb/gadget/multi.c10
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c4
-rw-r--r--drivers/usb/gadget/udc-core.c8
-rw-r--r--drivers/usb/host/ehci-hub.c1
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-mem.c1
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/musb/tusb6010.c7
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c2
-rw-r--r--drivers/usb/phy/phy-samsung-usb2.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c31
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h34
-rw-r--r--drivers/usb/serial/mos7840.c175
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/suunto.c41
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vfio/pci/vfio_pci.c23
-rw-r--r--drivers/vfio/vfio.c37
-rw-r--r--drivers/vhost/net.c37
-rw-r--r--drivers/vhost/scsi.c17
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.h10
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/backlight/max8925_bl.c41
-rw-r--r--drivers/video/mxsfb.c26
-rw-r--r--drivers/video/nuc900fb.c3
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c18
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sh7760fb.c2
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/video/vga16fb.c1
-rw-r--r--drivers/video/xilinxfb.c4
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/evtchn.c21
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c19
-rw-r--r--fs/btrfs/backref.c48
-rw-r--r--fs/btrfs/ctree.c1
-rw-r--r--fs/btrfs/extent-tree.c27
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/file.c62
-rw-r--r--fs/btrfs/inode.c52
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/debugfs/inode.c69
-rw-r--r--fs/dlm/user.c1
-rw-r--r--fs/ext3/namei.c2
-rw-r--r--fs/ext4/balloc.c4
-rw-r--r--fs/ext4/extents.c23
-rw-r--r--fs/ext4/extents_status.c73
-rw-r--r--fs/ext4/ialloc.c10
-rw-r--r--fs/ext4/inode.c52
-rw-r--r--fs/ext4/mballoc.c11
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/page-io.c35
-rw-r--r--fs/ext4/super.c15
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/fuse/dir.c51
-rw-r--r--fs/lockd/clntlock.c13
-rw-r--r--fs/lockd/clntproc.c5
-rw-r--r--fs/lockd/svclock.c4
-rw-r--r--fs/namei.c10
-rw-r--r--fs/nfs/inode.c11
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/nfs4xdr.c21
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfsd/nfs4proc.c4
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfs4xdr.c5
-rw-r--r--fs/nfsd/nfsd.h1
-rw-r--r--fs/nfsd/nfssvc.c13
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/ocfs2/refcounttree.c5
-rw-r--r--fs/open.c4
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/reiserfs/procfs.c99
-rw-r--r--fs/reiserfs/super.c3
-rw-r--r--fs/super.c25
-rw-r--r--fs/sysfs/group.c70
-rw-r--r--fs/xfs/xfs_dinode.h3
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--include/acpi/acpi_bus.h14
-rw-r--r--include/acpi/acpixf.h1
-rw-r--r--include/acpi/actypes.h15
-rw-r--r--include/drm/drm_fixed.h14
-rw-r--r--include/dt-bindings/clock/vf610-clock.h4
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h2
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/cgroup_subsys.h45
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/crc-t10dif.h4
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/device.h37
-rw-r--r--include/linux/drbd.h6
-rw-r--r--include/linux/drbd_genl.h2
-rw-r--r--include/linux/drbd_limits.h9
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/if_vlan.h3
-rw-r--r--include/linux/iio/iio.h4
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h137
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h16
-rw-r--r--include/linux/mlx5/device.h20
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mod_devicetable.h5
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/platform_data/mmc-pxamci.h2
-rw-r--r--include/linux/pm_wakeup.h4
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/shdma-base.h4
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sysfs.h64
-rw-r--r--include/linux/tick.h6
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/linux/user_namespace.h1
-rw-r--r--include/linux/vmpressure.h3
-rw-r--r--include/media/v4l2-ctrls.h1
-rw-r--r--include/net/busy_poll.h11
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/nfc/nfc.h4
-rw-r--r--include/net/sock.h2
-rw-r--r--include/trace/events/bcache.h381
-rw-r--r--include/trace/ftrace.h4
-rw-r--r--include/uapi/asm-generic/fcntl.h4
-rw-r--r--include/uapi/linux/firewire-cdev.h4
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/usb/ch11.h11
-rw-r--r--include/xen/interface/io/blkif.h53
-rw-r--r--include/xen/interface/io/ring.h5
-rw-r--r--init/calibrate.c13
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cgroup.c35
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/power/autosleep.c3
-rw-r--r--kernel/power/process.c11
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/braille.c49
-rw-r--r--kernel/printk/braille.h48
-rw-r--r--kernel/printk/console_cmdline.h14
-rw-r--r--kernel/printk/printk.c (renamed from kernel/printk.c)185
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/ptrace.c1
-rw-r--r--kernel/rcutorture.c6
-rw-r--r--kernel/rcutree.c6
-rw-r--r--kernel/rcutree.h4
-rw-r--r--kernel/rcutree_plugin.h6
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/softirq.c8
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/tick-sched.c11
-rw-r--r--kernel/timer.c10
-rw-r--r--kernel/trace/ftrace.c105
-rw-r--r--kernel/trace/ring_buffer.c26
-rw-r--r--kernel/trace/trace.c254
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_event_perf.c10
-rw-r--r--kernel/trace/trace_events.c292
-rw-r--r--kernel/trace/trace_events_filter.c21
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_functions_graph.c54
-rw-r--r--kernel/trace/trace_kprobe.c50
-rw-r--r--kernel/trace/trace_mmiotrace.c8
-rw-r--r--kernel/trace/trace_output.c14
-rw-r--r--kernel/trace/trace_syscalls.c26
-rw-r--r--kernel/trace/trace_uprobe.c53
-rw-r--r--kernel/user_namespace.c17
-rw-r--r--kernel/wait.c3
-rw-r--r--kernel/workqueue.c48
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/crc-t10dif.c73
-rw-r--r--lib/earlycpio.c2
-rw-r--r--lib/mpi/longlong.h17
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c10
-rw-r--r--mm/slub.c7
-rw-r--r--mm/swap.c29
-rw-r--r--mm/vmpressure.c28
-rw-r--r--mm/vmstat.c6
-rw-r--r--mm/zbud.c2
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c7
-rw-r--r--net/Kconfig2
-rw-r--r--net/bluetooth/hci_core.c26
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_multicast.c44
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/ethtool.c30
-rw-r--r--net/core/flow.c4
-rw-r--r--net/core/neighbour.c29
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/sysctl_net_core.c8
-rw-r--r--net/ethernet/eth.c21
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv6/addrconf.c43
-rw-r--r--net/ipv6/ip6_fib.c25
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ndisc.c10
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/irda/irlan/irlan_eth.c31
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/mesh_ps.c4
-rw-r--r--net/mac80211/pm.c7
-rw-r--r--net/mac80211/rc80211_minstrel.c3
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c10
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/netfilter/nf_conntrack_expect.c5
-rw-r--r--net/netfilter/xt_socket.c10
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_domainhash.c104
-rw-r--r--net/netlabel/netlabel_domainhash.h46
-rw-r--r--net/netlabel/netlabel_kapi.c88
-rw-r--r--net/netlabel/netlabel_mgmt.c44
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/nfc/core.c20
-rw-r--r--net/nfc/hci/core.c8
-rw-r--r--net/nfc/nci/Kconfig1
-rw-r--r--net/nfc/netlink.c12
-rw-r--r--net/nfc/nfc.h6
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_cbq.c1
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_qfq.c85
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c3
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c9
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/netns.h1
-rw-r--r--net/sunrpc/rpcb_clnt.c48
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c20
-rw-r--r--net/tipc/server.c15
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/sme.c29
-rw-r--r--security/smack/smack_lsm.c24
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c2
-rw-r--r--sound/core/Kconfig3
-rw-r--r--sound/core/Makefile3
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/core/pcm_dmaengine.c (renamed from sound/soc/soc-dmaengine-pcm.c)0
-rw-r--r--sound/core/seq/oss/seq_oss_init.c16
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/oss/vwsnd.c4
-rw-r--r--sound/pci/asihpi/asihpi.c3
-rw-r--r--sound/pci/atiixp.c2
-rw-r--r--sound/pci/atiixp_modem.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c14
-rw-r--r--sound/soc/Kconfig5
-rw-r--r--sound/soc/Makefile4
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c2
-rw-r--r--sound/soc/au1x/ac97c.c2
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c5
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.h1
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c4
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c5
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/wm0010.c24
-rw-r--r--sound/soc/codecs/wm8978.c1
-rw-r--r--sound/soc/codecs/wm8994.c4
-rw-r--r--sound/soc/omap/Kconfig2
-rw-r--r--sound/soc/omap/mcbsp.c39
-rw-r--r--sound/soc/omap/omap-dmic.c11
-rw-r--r--sound/soc/omap/omap-mcpdm.c16
-rw-r--r--sound/soc/omap/omap-pcm.c17
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/s6000/s6000-pcm.c2
-rw-r--r--sound/soc/samsung/i2s.c8
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-dapm.c5
-rw-r--r--sound/soc/spear/Kconfig2
-rw-r--r--sound/soc/tegra/tegra20_ac97.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c4
-rw-r--r--sound/usb/6fire/comm.c38
-rw-r--r--sound/usb/6fire/comm.h2
-rw-r--r--sound/usb/6fire/pcm.c14
-rw-r--r--sound/usb/endpoint.c13
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/misc/ua101.c14
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c4
-rw-r--r--tools/hv/hv_kvp_daemon.c5
1469 files changed, 15940 insertions, 101705 deletions
diff --git a/.gitignore b/.gitignore
index 3b8b9b33be38..7e9932e55475 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ modules.builtin
29*.bz2 29*.bz2
30*.lzma 30*.lzma
31*.xz 31*.xz
32*.lz4
32*.lzo 33*.lzo
33*.patch 34*.patch
34*.gcno 35*.gcno
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback
new file mode 100644
index 000000000000..8bb43b66eb55
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback
@@ -0,0 +1,17 @@
1What: /sys/module/xen_blkback/parameters/max_buffer_pages
2Date: March 2013
3KernelVersion: 3.11
4Contact: Roger Pau Monné <roger.pau@citrix.com>
5Description:
6 Maximum number of free pages to keep in each block
7 backend buffer.
8
9What: /sys/module/xen_blkback/parameters/max_persistent_grants
10Date: March 2013
11KernelVersion: 3.11
12Contact: Roger Pau Monné <roger.pau@citrix.com>
13Description:
14 Maximum number of grants to map persistently in
15 blkback. If the frontend tries to use more than
16 max_persistent_grants, the LRU kicks in and starts
17 removing 5% of max_persistent_grants every 100ms.
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkfront b/Documentation/ABI/testing/sysfs-driver-xen-blkfront
new file mode 100644
index 000000000000..c0a6cb7eb314
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkfront
@@ -0,0 +1,10 @@
1What: /sys/module/xen_blkfront/parameters/max
2Date: June 2013
3KernelVersion: 3.11
4Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5Description:
6 Maximum number of segments that the frontend will negotiate
7 with the backend for indirect descriptors. The default value
8 is 32 - higher value means more potential throughput but more
9 memory usage. The backend picks the minimum of the frontend
10 and its default backend value.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index cbfdf5486639..fe397f90a34f 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -84,7 +84,7 @@ X!Iinclude/linux/kobject.h
84 84
85 <sect1><title>Kernel utility functions</title> 85 <sect1><title>Kernel utility functions</title>
86!Iinclude/linux/kernel.h 86!Iinclude/linux/kernel.h
87!Ekernel/printk.c 87!Ekernel/printk/printk.c
88!Ekernel/panic.c 88!Ekernel/panic.c
89!Ekernel/sys.c 89!Ekernel/sys.c
90!Ekernel/rcupdate.c 90!Ekernel/rcupdate.c
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 6a8b7158697f..9c92bb879b6d 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -1,6 +1,6 @@
1<?xml version="1.0"?> 1<?xml version="1.0"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" 2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [ 3 "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
4<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities; 4<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
5<!ENTITY media-indices SYSTEM "./media-indices.tmpl"> 5<!ENTITY media-indices SYSTEM "./media-indices.tmpl">
6 6
diff --git a/Documentation/bcache.txt b/Documentation/bcache.txt
index c3365f26b2d9..32b6c3189d98 100644
--- a/Documentation/bcache.txt
+++ b/Documentation/bcache.txt
@@ -46,29 +46,33 @@ you format your backing devices and cache device at the same time, you won't
46have to manually attach: 46have to manually attach:
47 make-bcache -B /dev/sda /dev/sdb -C /dev/sdc 47 make-bcache -B /dev/sda /dev/sdb -C /dev/sdc
48 48
49To make bcache devices known to the kernel, echo them to /sys/fs/bcache/register: 49bcache-tools now ships udev rules, and bcache devices are known to the kernel
50immediately. Without udev, you can manually register devices like this:
50 51
51 echo /dev/sdb > /sys/fs/bcache/register 52 echo /dev/sdb > /sys/fs/bcache/register
52 echo /dev/sdc > /sys/fs/bcache/register 53 echo /dev/sdc > /sys/fs/bcache/register
53 54
54To register your bcache devices automatically, you could add something like 55Registering the backing device makes the bcache device show up in /dev; you can
55this to an init script: 56now format it and use it as normal. But the first time using a new bcache
57device, it'll be running in passthrough mode until you attach it to a cache.
58See the section on attaching.
56 59
57 echo /dev/sd* > /sys/fs/bcache/register_quiet 60The devices show up as:
58 61
59It'll look for bcache superblocks and ignore everything that doesn't have one. 62 /dev/bcache<N>
60 63
61Registering the backing device makes the bcache show up in /dev; you can now 64As well as (with udev):
62format it and use it as normal. But the first time using a new bcache device,
63it'll be running in passthrough mode until you attach it to a cache. See the
64section on attaching.
65 65
66The devices show up at /dev/bcacheN, and can be controlled via sysfs from 66 /dev/bcache/by-uuid/<uuid>
67/sys/block/bcacheN/bcache: 67 /dev/bcache/by-label/<label>
68
69To get started:
68 70
69 mkfs.ext4 /dev/bcache0 71 mkfs.ext4 /dev/bcache0
70 mount /dev/bcache0 /mnt 72 mount /dev/bcache0 /mnt
71 73
74You can control bcache devices through sysfs at /sys/block/bcache<N>/bcache .
75
72Cache devices are managed as sets; multiple caches per set isn't supported yet 76Cache devices are managed as sets; multiple caches per set isn't supported yet
73but will allow for mirroring of metadata and dirty data in the future. Your new 77but will allow for mirroring of metadata and dirty data in the future. Your new
74cache set shows up as /sys/fs/bcache/<UUID> 78cache set shows up as /sys/fs/bcache/<UUID>
@@ -80,11 +84,11 @@ must be attached to your cache set to enable caching. Attaching a backing
80device to a cache set is done thusly, with the UUID of the cache set in 84device to a cache set is done thusly, with the UUID of the cache set in
81/sys/fs/bcache: 85/sys/fs/bcache:
82 86
83 echo <UUID> > /sys/block/bcache0/bcache/attach 87 echo <CSET-UUID> > /sys/block/bcache0/bcache/attach
84 88
85This only has to be done once. The next time you reboot, just reregister all 89This only has to be done once. The next time you reboot, just reregister all
86your bcache devices. If a backing device has data in a cache somewhere, the 90your bcache devices. If a backing device has data in a cache somewhere, the
87/dev/bcache# device won't be created until the cache shows up - particularly 91/dev/bcache<N> device won't be created until the cache shows up - particularly
88important if you have writeback caching turned on. 92important if you have writeback caching turned on.
89 93
90If you're booting up and your cache device is gone and never coming back, you 94If you're booting up and your cache device is gone and never coming back, you
@@ -191,6 +195,9 @@ want for getting the best possible numbers when benchmarking.
191 195
192SYSFS - BACKING DEVICE: 196SYSFS - BACKING DEVICE:
193 197
198Available at /sys/block/<bdev>/bcache, /sys/block/bcache*/bcache and
199(if attached) /sys/fs/bcache/<cset-uuid>/bdev*
200
194attach 201attach
195 Echo the UUID of a cache set to this file to enable caching. 202 Echo the UUID of a cache set to this file to enable caching.
196 203
@@ -300,6 +307,8 @@ cache_readaheads
300 307
301SYSFS - CACHE SET: 308SYSFS - CACHE SET:
302 309
310Available at /sys/fs/bcache/<cset-uuid>
311
303average_key_size 312average_key_size
304 Average data per key in the btree. 313 Average data per key in the btree.
305 314
@@ -390,6 +399,8 @@ trigger_gc
390 399
391SYSFS - CACHE DEVICE: 400SYSFS - CACHE DEVICE:
392 401
402Available at /sys/block/<cdev>/bcache
403
393block_size 404block_size
394 Minimum granularity of writes - should match hardware sector size. 405 Minimum granularity of writes - should match hardware sector size.
395 406
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index edd4b4df3932..786dc82f98ce 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -267,8 +267,8 @@ Q: If i have some kernel code that needs to be aware of CPU arrival and
267A: This is what you would need in your kernel code to receive notifications. 267A: This is what you would need in your kernel code to receive notifications.
268 268
269 #include <linux/cpu.h> 269 #include <linux/cpu.h>
270 static int __cpuinit foobar_cpu_callback(struct notifier_block *nfb, 270 static int foobar_cpu_callback(struct notifier_block *nfb,
271 unsigned long action, void *hcpu) 271 unsigned long action, void *hcpu)
272 { 272 {
273 unsigned int cpu = (unsigned long)hcpu; 273 unsigned int cpu = (unsigned long)hcpu;
274 274
@@ -285,7 +285,7 @@ A: This is what you would need in your kernel code to receive notifications.
285 return NOTIFY_OK; 285 return NOTIFY_OK;
286 } 286 }
287 287
288 static struct notifier_block __cpuinitdata foobar_cpu_notifer = 288 static struct notifier_block foobar_cpu_notifer =
289 { 289 {
290 .notifier_call = foobar_cpu_callback, 290 .notifier_call = foobar_cpu_callback,
291 }; 291 };
diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.txt b/Documentation/devicetree/bindings/clock/imx27-clock.txt
index ab1a56e9de9d..7a2070393732 100644
--- a/Documentation/devicetree/bindings/clock/imx27-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx27-clock.txt
@@ -98,6 +98,7 @@ clocks and IDs.
98 fpm 83 98 fpm 83
99 mpll_osc_sel 84 99 mpll_osc_sel 84
100 mpll_sel 85 100 mpll_sel 85
101 spll_gate 86
101 102
102Examples: 103Examples:
103 104
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
index a1ee681942cc..6113f9275f42 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
@@ -4,7 +4,7 @@
4Required properties : 4Required properties :
5 5
6 - reg : Offset and length of the register set for the device 6 - reg : Offset and length of the register set for the device
7 - compatible : Should be "marvell,mv64xxx-i2c" 7 - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
8 - interrupts : The interrupt number 8 - interrupts : The interrupt number
9 9
10Optional properties : 10Optional properties :
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index d5a308629c57..30b0581bb1ce 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -31,9 +31,8 @@ Optional nodes:
31 Optional sub-node properties: 31 Optional sub-node properties:
32 ti,warm-reset - maintain voltage during warm reset(boolean) 32 ti,warm-reset - maintain voltage during warm reset(boolean)
33 ti,roof-floor - control voltage selection by pin(boolean) 33 ti,roof-floor - control voltage selection by pin(boolean)
34 ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto, 34 ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
35 2 - eco, 3 - forced pwm 35 2 - eco, 3 - forced pwm
36 ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us
37 ti,smps-range - OTP has the wrong range set for the hardware so override 36 ti,smps-range - OTP has the wrong range set for the hardware so override
38 0 - low range, 1 - high range. 37 0 - low range, 1 - high range.
39 38
@@ -59,7 +58,6 @@ pmic {
59 ti,warm-reset; 58 ti,warm-reset;
60 ti,roof-floor; 59 ti,roof-floor;
61 ti,mode-sleep = <0>; 60 ti,mode-sleep = <0>;
62 ti,tstep = <0>;
63 ti,smps-range = <1>; 61 ti,smps-range = <1>;
64 }; 62 };
65 63
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index d5a79caec147..366ce9b87240 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -26,6 +26,7 @@ est ESTeem Wireless Modems
26fsl Freescale Semiconductor 26fsl Freescale Semiconductor
27GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc. 27GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
28gef GE Fanuc Intelligent Platforms Embedded Systems, Inc. 28gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
29hisilicon Hisilicon Limited.
29hp Hewlett Packard 30hp Hewlett Packard
30ibm International Business Machines (IBM) 31ibm International Business Machines (IBM)
31idt Integrated Device Technologies, Inc. 32idt Integrated Device Technologies, Inc.
@@ -43,6 +44,7 @@ nxp NXP Semiconductors
43onnn ON Semiconductor Corp. 44onnn ON Semiconductor Corp.
44picochip Picochip Ltd 45picochip Picochip Ltd
45powervr PowerVR (deprecated, use img) 46powervr PowerVR (deprecated, use img)
47qca Qualcomm Atheros, Inc.
46qcom Qualcomm, Inc. 48qcom Qualcomm, Inc.
47ralink Mediatek/Ralink Technology Corp. 49ralink Mediatek/Ralink Technology Corp.
48ramtron Ramtron International 50ramtron Ramtron International
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
index 050d37fe6d40..8148a47fc70e 100644
--- a/Documentation/ja_JP/HOWTO
+++ b/Documentation/ja_JP/HOWTO
@@ -11,14 +11,14 @@ for non English (read: Japanese) speakers and is not intended as a
11fork. So if you have any comments or updates for this file, please try 11fork. So if you have any comments or updates for this file, please try
12to update the original English file first. 12to update the original English file first.
13 13
14Last Updated: 2011/03/31 14Last Updated: 2013/07/19
15================================== 15==================================
16これは、 16これは、
17linux-2.6.38/Documentation/HOWTO 17linux-3.10/Documentation/HOWTO
18の和訳です。 18の和訳です。
19 19
20翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ > 20翻訳団体: JF プロジェクト < http://linuxjf.sourceforge.jp/ >
21翻訳日: 2011/3/28 21翻訳日: 2013/7/19
22翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com> 22翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com>
23校正者: 松倉さん <nbh--mats at nifty dot com> 23校正者: 松倉さん <nbh--mats at nifty dot com>
24 小林 雅典さん (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp> 24 小林 雅典さん (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp>
@@ -245,7 +245,7 @@ Linux カーネルソースツリーの中に含まれる、きれいにし、
245自己参照方式で、索引がついた web 形式で、ソースコードを参照することが 245自己参照方式で、索引がついた web 形式で、ソースコードを参照することが
246できます。この最新の素晴しいカーネルコードのリポジトリは以下で見つかり 246できます。この最新の素晴しいカーネルコードのリポジトリは以下で見つかり
247ます- 247ます-
248 http://sosdg.org/~qiyong/lxr/ 248 http://lxr.linux.no/+trees
249 249
250開発プロセス 250開発プロセス
251----------------------- 251-----------------------
@@ -253,24 +253,24 @@ Linux カーネルソースツリーの中に含まれる、きれいにし、
253Linux カーネルの開発プロセスは現在幾つかの異なるメインカーネル「ブラン 253Linux カーネルの開発プロセスは現在幾つかの異なるメインカーネル「ブラン
254チ」と多数のサブシステム毎のカーネルブランチから構成されます。 254チ」と多数のサブシステム毎のカーネルブランチから構成されます。
255これらのブランチとは- 255これらのブランチとは-
256 - メインの 2.6.x カーネルツリー 256 - メインの 3.x カーネルツリー
257 - 2.6.x.y -stable カーネルツリー 257 - 3.x.y -stable カーネルツリー
258 - 2.6.x -git カーネルパッチ 258 - 3.x -git カーネルパッチ
259 - サブシステム毎のカーネルツリーとパッチ 259 - サブシステム毎のカーネルツリーとパッチ
260 - 統合テストのための 2.6.x -next カーネルツリー 260 - 統合テストのための 3.x -next カーネルツリー
261 261
2622.6.x カーネルツリー 2623.x カーネルツリー
263----------------- 263-----------------
264 264
2652.6.x カーネルは Linus Torvalds によってメンテナンスされ、kernel.org 2653.x カーネルは Linus Torvalds によってメンテナンスされ、kernel.org
266の pub/linux/kernel/v2.6/ ディレクトリに存在します。この開発プロセスは 266の pub/linux/kernel/v3.x/ ディレクトリに存在します。この開発プロセスは
267以下のとおり- 267以下のとおり-
268 268
269 - 新しいカーネルがリリースされた直後に、2週間の特別期間が設けられ、 269 - 新しいカーネルがリリースされた直後に、2週間の特別期間が設けられ、
270 この期間中に、メンテナ達は Linus に大きな差分を送ることができます。 270 この期間中に、メンテナ達は Linus に大きな差分を送ることができます。
271 このような差分は通常 -next カーネルに数週間含まれてきたパッチです。 271 このような差分は通常 -next カーネルに数週間含まれてきたパッチです。
272 大きな変更は git(カーネルのソース管理ツール、詳細は 272 大きな変更は git(カーネルのソース管理ツール、詳細は
273 http://git-scm.com/ 参照) を使って送るのが好ましいやり方ですが、パッ 273 http://git-scm.com/ 参照) を使って送るのが好ましいやり方ですが、パッ
274 チファイルの形式のまま送るのでも十分です。 274 チファイルの形式のまま送るのでも十分です。
275 275
276 - 2週間後、-rc1 カーネルがリリースされ、この後にはカーネル全体の安定 276 - 2週間後、-rc1 カーネルがリリースされ、この後にはカーネル全体の安定
@@ -302,20 +302,20 @@ Andrew Morton が Linux-kernel メーリングリストにカーネルリリー
302 実に認識されたバグの状況によりリリースされるのであり、前もって決めら 302 実に認識されたバグの状況によりリリースされるのであり、前もって決めら
303 れた計画によってリリースされるものではないからです。」 303 れた計画によってリリースされるものではないからです。」
304 304
3052.6.x.y -stable カーネルツリー 3053.x.y -stable カーネルツリー
306--------------------------- 306---------------------------
307 307
308バージョン番号が4つの数字に分かれているカーネルは -stable カーネルです。 308バージョン番号が3つの数字に分かれているカーネルは -stable カーネルです。
309これには、2.6.x カーネルで見つかったセキュリティ問題や重大な後戻りに対 309これには、3.x カーネルで見つかったセキュリティ問題や重大な後戻りに対
310する比較的小さい重要な修正が含まれます。 310する比較的小さい重要な修正が含まれます。
311 311
312これは、開発/実験的バージョンのテストに協力することに興味が無く、 312これは、開発/実験的バージョンのテストに協力することに興味が無く、
313最新の安定したカーネルを使いたいユーザに推奨するブランチです。 313最新の安定したカーネルを使いたいユーザに推奨するブランチです。
314 314
315もし、2.6.x.y カーネルが存在しない場合には、番号が一番大きい 2.6.x が 315もし、3.x.y カーネルが存在しない場合には、番号が一番大きい 3.x が
316最新の安定版カーネルです。 316最新の安定版カーネルです。
317 317
3182.6.x.y は "stable" チーム <stable@kernel.org> でメンテされており、必 3183.x.y は "stable" チーム <stable@kernel.org> でメンテされており、必
319要に応じてリリースされます。通常のリリース期間は 2週間毎ですが、差し迫っ 319要に応じてリリースされます。通常のリリース期間は 2週間毎ですが、差し迫っ
320た問題がなければもう少し長くなることもあります。セキュリティ関連の問題 320た問題がなければもう少し長くなることもあります。セキュリティ関連の問題
321の場合はこれに対してだいたいの場合、すぐにリリースがされます。 321の場合はこれに対してだいたいの場合、すぐにリリースがされます。
@@ -324,7 +324,7 @@ Andrew Morton が Linux-kernel メーリングリストにカーネルリリー
324イルにはどのような種類の変更が -stable ツリーに受け入れ可能か、またリ 324イルにはどのような種類の変更が -stable ツリーに受け入れ可能か、またリ
325リースプロセスがどう動くかが記述されています。 325リースプロセスがどう動くかが記述されています。
326 326
3272.6.x -git パッチ 3273.x -git パッチ
328------------------ 328------------------
329 329
330git リポジトリで管理されているLinus のカーネルツリーの毎日のスナップ 330git リポジトリで管理されているLinus のカーネルツリーの毎日のスナップ
@@ -358,14 +358,14 @@ quilt シリーズとして公開されているパッチキューも使われ
358をつけることができます。大部分のこれらの patchwork のサイトは 358をつけることができます。大部分のこれらの patchwork のサイトは
359http://patchwork.kernel.org/ でリストされています。 359http://patchwork.kernel.org/ でリストされています。
360 360
361統合テストのための 2.6.x -next カーネルツリー 361統合テストのための 3.x -next カーネルツリー
362--------------------------------------------- 362---------------------------------------------
363 363
364サブシステムツリーの更新内容がメインラインの 2.6.x ツリーにマージされ 364サブシステムツリーの更新内容がメインラインの 3.x ツリーにマージされ
365る前に、それらは統合テストされる必要があります。この目的のため、実質的 365る前に、それらは統合テストされる必要があります。この目的のため、実質的
366に全サブシステムツリーからほぼ毎日プルされてできる特別なテスト用のリ 366に全サブシステムツリーからほぼ毎日プルされてできる特別なテスト用のリ
367ポジトリが存在します- 367ポジトリが存在します-
368 http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git 368 http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
369 http://linux.f-seidel.de/linux-next/pmwiki/ 369 http://linux.f-seidel.de/linux-next/pmwiki/
370 370
371このやり方によって、-next カーネルは次のマージ機会でどんなものがメイン 371このやり方によって、-next カーネルは次のマージ機会でどんなものがメイン
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 1c15043aaee4..d569f2a424d5 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -52,7 +52,7 @@ Default: 64
52 52
53busy_read 53busy_read
54---------------- 54----------------
55Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) 55Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
56Approximate time in us to busy loop waiting for packets on the device queue. 56Approximate time in us to busy loop waiting for packets on the device queue.
57This sets the default value of the SO_BUSY_POLL socket option. 57This sets the default value of the SO_BUSY_POLL socket option.
58Can be set or overridden per socket by setting socket option SO_BUSY_POLL, 58Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
@@ -63,7 +63,7 @@ Default: 0 (off)
63 63
64busy_poll 64busy_poll
65---------------- 65----------------
66Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) 66Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
67Approximate time in us to busy loop waiting for events. 67Approximate time in us to busy loop waiting for events.
68Recommended value depends on the number of sockets you poll on. 68Recommended value depends on the number of sockets you poll on.
69For several sockets 50, for several hundreds 100. 69For several sockets 50, for several hundreds 100.
diff --git a/MAINTAINERS b/MAINTAINERS
index bf61e04291ab..7cacc88dc79c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -965,6 +965,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
965L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 965L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
966S: Maintained 966S: Maintained
967 967
968ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
969M: Santosh Shilimkar <santosh.shilimkar@ti.com>
970L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
971S: Maintained
972F: arch/arm/mach-keystone/
973
968ARM/LOGICPD PXA270 MACHINE SUPPORT 974ARM/LOGICPD PXA270 MACHINE SUPPORT
969M: Lennert Buytenhek <kernel@wantstofly.org> 975M: Lennert Buytenhek <kernel@wantstofly.org>
970L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1259,7 +1265,6 @@ F: drivers/rtc/rtc-coh901331.c
1259T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 1265T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
1260 1266
1261ARM/Ux500 ARM ARCHITECTURE 1267ARM/Ux500 ARM ARCHITECTURE
1262M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
1263M: Linus Walleij <linus.walleij@linaro.org> 1268M: Linus Walleij <linus.walleij@linaro.org>
1264L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1269L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1265S: Maintained 1270S: Maintained
@@ -1406,7 +1411,7 @@ ATHEROS ATH6KL WIRELESS DRIVER
1406M: Kalle Valo <kvalo@qca.qualcomm.com> 1411M: Kalle Valo <kvalo@qca.qualcomm.com>
1407L: linux-wireless@vger.kernel.org 1412L: linux-wireless@vger.kernel.org
1408W: http://wireless.kernel.org/en/users/Drivers/ath6kl 1413W: http://wireless.kernel.org/en/users/Drivers/ath6kl
1409T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git 1414T: git git://github.com/kvalo/ath.git
1410S: Supported 1415S: Supported
1411F: drivers/net/wireless/ath/ath6kl/ 1416F: drivers/net/wireless/ath/ath6kl/
1412 1417
@@ -1642,7 +1647,7 @@ S: Maintained
1642F: drivers/net/hamradio/baycom* 1647F: drivers/net/hamradio/baycom*
1643 1648
1644BCACHE (BLOCK LAYER CACHE) 1649BCACHE (BLOCK LAYER CACHE)
1645M: Kent Overstreet <koverstreet@google.com> 1650M: Kent Overstreet <kmo@daterainc.com>
1646L: linux-bcache@vger.kernel.org 1651L: linux-bcache@vger.kernel.org
1647W: http://bcache.evilpiepirate.org 1652W: http://bcache.evilpiepirate.org
1648S: Maintained: 1653S: Maintained:
@@ -2871,7 +2876,7 @@ F: drivers/media/usb/dvb-usb-v2/dvb_usb*
2871F: drivers/media/usb/dvb-usb-v2/usb_urb.c 2876F: drivers/media/usb/dvb-usb-v2/usb_urb.c
2872 2877
2873DYNAMIC DEBUG 2878DYNAMIC DEBUG
2874M: Jason Baron <jbaron@redhat.com> 2879M: Jason Baron <jbaron@akamai.com>
2875S: Maintained 2880S: Maintained
2876F: lib/dynamic_debug.c 2881F: lib/dynamic_debug.c
2877F: include/linux/dynamic_debug.h 2882F: include/linux/dynamic_debug.h
@@ -3346,7 +3351,7 @@ F: Documentation/firmware_class/
3346F: drivers/base/firmware*.c 3351F: drivers/base/firmware*.c
3347F: include/linux/firmware.h 3352F: include/linux/firmware.h
3348 3353
3349FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card) 3354FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
3350M: Joshua Morris <josh.h.morris@us.ibm.com> 3355M: Joshua Morris <josh.h.morris@us.ibm.com>
3351M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> 3356M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
3352S: Maintained 3357S: Maintained
@@ -3622,11 +3627,9 @@ F: drivers/isdn/gigaset/
3622F: include/uapi/linux/gigaset_dev.h 3627F: include/uapi/linux/gigaset_dev.h
3623 3628
3624GPIO SUBSYSTEM 3629GPIO SUBSYSTEM
3625M: Grant Likely <grant.likely@linaro.org>
3626M: Linus Walleij <linus.walleij@linaro.org> 3630M: Linus Walleij <linus.walleij@linaro.org>
3627S: Maintained 3631S: Maintained
3628L: linux-gpio@vger.kernel.org 3632L: linux-gpio@vger.kernel.org
3629T: git git://git.secretlab.ca/git/linux-2.6.git
3630F: Documentation/gpio.txt 3633F: Documentation/gpio.txt
3631F: drivers/gpio/ 3634F: drivers/gpio/
3632F: include/linux/gpio* 3635F: include/linux/gpio*
@@ -4472,8 +4475,6 @@ F: drivers/irqchip/
4472 4475
4473IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 4476IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
4474M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 4477M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
4475M: Grant Likely <grant.likely@linaro.org>
4476T: git git://git.secretlab.ca/git/linux-2.6.git irqdomain/next
4477S: Maintained 4478S: Maintained
4478F: Documentation/IRQ-domain.txt 4479F: Documentation/IRQ-domain.txt
4479F: include/linux/irqdomain.h 4480F: include/linux/irqdomain.h
@@ -4990,7 +4991,7 @@ F: arch/powerpc/platforms/44x/
4990 4991
4991LINUX FOR POWERPC EMBEDDED XILINX VIRTEX 4992LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
4992L: linuxppc-dev@lists.ozlabs.org 4993L: linuxppc-dev@lists.ozlabs.org
4993S: Unmaintained 4994S: Orphan
4994F: arch/powerpc/*/*virtex* 4995F: arch/powerpc/*/*virtex*
4995F: arch/powerpc/*/*/*virtex* 4996F: arch/powerpc/*/*/*virtex*
4996 4997
@@ -5886,7 +5887,7 @@ OMAP DEVICE TREE SUPPORT
5886M: Benoît Cousson <b-cousson@ti.com> 5887M: Benoît Cousson <b-cousson@ti.com>
5887M: Tony Lindgren <tony@atomide.com> 5888M: Tony Lindgren <tony@atomide.com>
5888L: linux-omap@vger.kernel.org 5889L: linux-omap@vger.kernel.org
5889L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers) 5890L: devicetree@vger.kernel.org
5890S: Maintained 5891S: Maintained
5891F: arch/arm/boot/dts/*omap* 5892F: arch/arm/boot/dts/*omap*
5892F: arch/arm/boot/dts/*am3* 5893F: arch/arm/boot/dts/*am3*
@@ -6050,17 +6051,28 @@ F: drivers/i2c/busses/i2c-ocores.c
6050OPEN FIRMWARE AND FLATTENED DEVICE TREE 6051OPEN FIRMWARE AND FLATTENED DEVICE TREE
6051M: Grant Likely <grant.likely@linaro.org> 6052M: Grant Likely <grant.likely@linaro.org>
6052M: Rob Herring <rob.herring@calxeda.com> 6053M: Rob Herring <rob.herring@calxeda.com>
6053L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers) 6054L: devicetree@vger.kernel.org
6054W: http://fdt.secretlab.ca 6055W: http://fdt.secretlab.ca
6055T: git git://git.secretlab.ca/git/linux-2.6.git 6056T: git git://git.secretlab.ca/git/linux-2.6.git
6056S: Maintained 6057S: Maintained
6057F: Documentation/devicetree 6058F: drivers/of/
6058F: drivers/of
6059F: include/linux/of*.h 6059F: include/linux/of*.h
6060F: scripts/dtc 6060F: scripts/dtc/
6061K: of_get_property 6061K: of_get_property
6062K: of_match_table 6062K: of_match_table
6063 6063
6064OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
6065M: Rob Herring <rob.herring@calxeda.com>
6066M: Pawel Moll <pawel.moll@arm.com>
6067M: Mark Rutland <mark.rutland@arm.com>
6068M: Stephen Warren <swarren@wwwdotorg.org>
6069M: Ian Campbell <ian.campbell@citrix.com>
6070L: devicetree@vger.kernel.org
6071S: Maintained
6072F: Documentation/devicetree/
6073F: arch/*/boot/dts/
6074F: include/dt-bindings/
6075
6064OPENRISC ARCHITECTURE 6076OPENRISC ARCHITECTURE
6065M: Jonas Bonn <jonas@southpole.se> 6077M: Jonas Bonn <jonas@southpole.se>
6066W: http://openrisc.net 6078W: http://openrisc.net
@@ -6719,6 +6731,14 @@ T: git git://linuxtv.org/anttip/media_tree.git
6719S: Maintained 6731S: Maintained
6720F: drivers/media/tuners/qt1010* 6732F: drivers/media/tuners/qt1010*
6721 6733
6734QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
6735M: Kalle Valo <kvalo@qca.qualcomm.com>
6736L: ath10k@lists.infradead.org
6737W: http://wireless.kernel.org/en/users/Drivers/ath10k
6738T: git git://github.com/kvalo/ath.git
6739S: Supported
6740F: drivers/net/wireless/ath/ath10k/
6741
6722QUALCOMM HEXAGON ARCHITECTURE 6742QUALCOMM HEXAGON ARCHITECTURE
6723M: Richard Kuo <rkuo@codeaurora.org> 6743M: Richard Kuo <rkuo@codeaurora.org>
6724L: linux-hexagon@vger.kernel.org 6744L: linux-hexagon@vger.kernel.org
@@ -7746,7 +7766,6 @@ F: drivers/clk/spear/
7746 7766
7747SPI SUBSYSTEM 7767SPI SUBSYSTEM
7748M: Mark Brown <broonie@kernel.org> 7768M: Mark Brown <broonie@kernel.org>
7749M: Grant Likely <grant.likely@linaro.org>
7750L: linux-spi@vger.kernel.org 7769L: linux-spi@vger.kernel.org
7751T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git 7770T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
7752Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 7771Q: http://patchwork.kernel.org/project/spi-devel-general/list/
@@ -7812,7 +7831,7 @@ F: drivers/staging/asus_oled/
7812 7831
7813STAGING - COMEDI 7832STAGING - COMEDI
7814M: Ian Abbott <abbotti@mev.co.uk> 7833M: Ian Abbott <abbotti@mev.co.uk>
7815M: Mori Hess <fmhess@users.sourceforge.net> 7834M: H Hartley Sweeten <hsweeten@visionengravers.com>
7816S: Odd Fixes 7835S: Odd Fixes
7817F: drivers/staging/comedi/ 7836F: drivers/staging/comedi/
7818 7837
@@ -8264,7 +8283,7 @@ S: Maintained
8264F: sound/soc/codecs/twl4030* 8283F: sound/soc/codecs/twl4030*
8265 8284
8266TI WILINK WIRELESS DRIVERS 8285TI WILINK WIRELESS DRIVERS
8267M: Luciano Coelho <coelho@ti.com> 8286M: Luciano Coelho <luca@coelho.fi>
8268L: linux-wireless@vger.kernel.org 8287L: linux-wireless@vger.kernel.org
8269W: http://wireless.kernel.org/en/users/Drivers/wl12xx 8288W: http://wireless.kernel.org/en/users/Drivers/wl12xx
8270W: http://wireless.kernel.org/en/users/Drivers/wl1251 8289W: http://wireless.kernel.org/en/users/Drivers/wl1251
@@ -8650,6 +8669,11 @@ T: git git://git.alsa-project.org/alsa-kernel.git
8650S: Maintained 8669S: Maintained
8651F: sound/usb/midi.* 8670F: sound/usb/midi.*
8652 8671
8672USB NETWORKING DRIVERS
8673L: linux-usb@vger.kernel.org
8674S: Odd Fixes
8675F: drivers/net/usb/
8676
8653USB OHCI DRIVER 8677USB OHCI DRIVER
8654M: Alan Stern <stern@rowland.harvard.edu> 8678M: Alan Stern <stern@rowland.harvard.edu>
8655L: linux-usb@vger.kernel.org 8679L: linux-usb@vger.kernel.org
@@ -9288,7 +9312,7 @@ S: Maintained
9288F: drivers/net/ethernet/xilinx/xilinx_axienet* 9312F: drivers/net/ethernet/xilinx/xilinx_axienet*
9289 9313
9290XILINX SYSTEMACE DRIVER 9314XILINX SYSTEMACE DRIVER
9291S: Unmaintained 9315S: Orphan
9292F: drivers/block/xsysace.c 9316F: drivers/block/xsysace.c
9293 9317
9294XILINX UARTLITE SERIAL DRIVER 9318XILINX UARTLITE SERIAL DRIVER
diff --git a/Makefile b/Makefile
index 9262ba8da4f9..6e488480bff3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc5
5NAME = Linux for Workgroups 5NAME = Linux for Workgroups
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 837a1f2d8b96..082d9b4b5472 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -15,6 +15,7 @@ config ALPHA
15 select ARCH_WANT_OPTIONAL_GPIOLIB 15 select ARCH_WANT_OPTIONAL_GPIOLIB
16 select ARCH_WANT_IPC_PARSE_VERSION 16 select ARCH_WANT_IPC_PARSE_VERSION
17 select ARCH_HAVE_NMI_SAFE_CMPXCHG 17 select ARCH_HAVE_NMI_SAFE_CMPXCHG
18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
18 select GENERIC_SMP_IDLE_THREAD 19 select GENERIC_SMP_IDLE_THREAD
19 select GENERIC_CMOS_UPDATE 20 select GENERIC_CMOS_UPDATE
20 select GENERIC_STRNCPY_FROM_USER 21 select GENERIC_STRNCPY_FROM_USER
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index c2cbe4fc391c..78b03ef39f6f 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
186 */ 186 */
187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188{ 188{
189 int c, old; 189 int c, new, old;
190 c = atomic_read(v); 190 smp_mb();
191 for (;;) { 191 __asm__ __volatile__(
192 if (unlikely(c == (u))) 192 "1: ldl_l %[old],%[mem]\n"
193 break; 193 " cmpeq %[old],%[u],%[c]\n"
194 old = atomic_cmpxchg((v), c, c + (a)); 194 " addl %[old],%[a],%[new]\n"
195 if (likely(old == c)) 195 " bne %[c],2f\n"
196 break; 196 " stl_c %[new],%[mem]\n"
197 c = old; 197 " beq %[new],3f\n"
198 } 198 "2:\n"
199 return c; 199 ".subsection 2\n"
200 "3: br 1b\n"
201 ".previous"
202 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
203 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
204 : "memory");
205 smp_mb();
206 return old;
200} 207}
201 208
202 209
@@ -207,21 +214,56 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
207 * @u: ...unless v is equal to u. 214 * @u: ...unless v is equal to u.
208 * 215 *
209 * Atomically adds @a to @v, so long as it was not @u. 216 * Atomically adds @a to @v, so long as it was not @u.
210 * Returns the old value of @v. 217 * Returns true iff @v was not @u.
211 */ 218 */
212static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 219static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
213{ 220{
214 long c, old; 221 long c, tmp;
215 c = atomic64_read(v); 222 smp_mb();
216 for (;;) { 223 __asm__ __volatile__(
217 if (unlikely(c == (u))) 224 "1: ldq_l %[tmp],%[mem]\n"
218 break; 225 " cmpeq %[tmp],%[u],%[c]\n"
219 old = atomic64_cmpxchg((v), c, c + (a)); 226 " addq %[tmp],%[a],%[tmp]\n"
220 if (likely(old == c)) 227 " bne %[c],2f\n"
221 break; 228 " stq_c %[tmp],%[mem]\n"
222 c = old; 229 " beq %[tmp],3f\n"
223 } 230 "2:\n"
224 return c != (u); 231 ".subsection 2\n"
232 "3: br 1b\n"
233 ".previous"
234 : [tmp] "=&r"(tmp), [c] "=&r"(c)
235 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
236 : "memory");
237 smp_mb();
238 return !c;
239}
240
241/*
242 * atomic64_dec_if_positive - decrement by 1 if old value positive
243 * @v: pointer of type atomic_t
244 *
245 * The function returns the old value of *v minus 1, even if
246 * the atomic variable, v, was not decremented.
247 */
248static inline long atomic64_dec_if_positive(atomic64_t *v)
249{
250 long old, tmp;
251 smp_mb();
252 __asm__ __volatile__(
253 "1: ldq_l %[old],%[mem]\n"
254 " subq %[old],1,%[tmp]\n"
255 " ble %[old],2f\n"
256 " stq_c %[tmp],%[mem]\n"
257 " beq %[tmp],3f\n"
258 "2:\n"
259 ".subsection 2\n"
260 "3: br 1b\n"
261 ".previous"
262 : [old] "=&r"(old), [tmp] "=&r"(tmp)
263 : [mem] "m"(*v)
264 : "memory");
265 smp_mb();
266 return old - 1;
225} 267}
226 268
227#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 269#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h
index bf46af51941b..a5b68b268bcf 100644
--- a/arch/alpha/include/asm/param.h
+++ b/arch/alpha/include/asm/param.h
@@ -3,7 +3,9 @@
3 3
4#include <uapi/asm/param.h> 4#include <uapi/asm/param.h>
5 5
6#define HZ CONFIG_HZ 6# undef HZ
7#define USER_HZ HZ 7# define HZ CONFIG_HZ
8# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ 8# define USER_HZ 1024
9# define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */
10
9#endif /* _ASM_ALPHA_PARAM_H */ 11#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index 3bba21e41b81..37b570d01202 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -168,8 +168,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
168#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 168#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
169#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 169#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
170 170
171#define arch_spin_relax(lock) cpu_relax()
172#define arch_read_relax(lock) cpu_relax()
173#define arch_write_relax(lock) cpu_relax()
174
175#endif /* _ALPHA_SPINLOCK_H */ 171#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 43baee17acdf..f2c94402e2c8 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -3,8 +3,7 @@
3 3
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6#define NR_SYSCALLS 508
7#define NR_SYSCALLS 506
8 7
9#define __ARCH_WANT_OLD_READDIR 8#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_STAT64 9#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/uapi/asm/param.h b/arch/alpha/include/uapi/asm/param.h
index 29daed819ebd..dbcd9834af6d 100644
--- a/arch/alpha/include/uapi/asm/param.h
+++ b/arch/alpha/include/uapi/asm/param.h
@@ -1,13 +1,7 @@
1#ifndef _UAPI_ASM_ALPHA_PARAM_H 1#ifndef _UAPI_ASM_ALPHA_PARAM_H
2#define _UAPI_ASM_ALPHA_PARAM_H 2#define _UAPI_ASM_ALPHA_PARAM_H
3 3
4/* ??? Gross. I don't want to parameterize this, and supposedly the
5 hardware ignores reprogramming. We also need userland buy-in to the
6 change in HZ, since this is visible in the wait4 resources etc. */
7
8#ifndef __KERNEL__
9#define HZ 1024 4#define HZ 1024
10#endif
11 5
12#define EXEC_PAGESIZE 8192 6#define EXEC_PAGESIZE 8192
13 7
@@ -17,5 +11,4 @@
17 11
18#define MAXHOSTNAMELEN 64 /* max length of hostname */ 12#define MAXHOSTNAMELEN 64 /* max length of hostname */
19 13
20
21#endif /* _UAPI_ASM_ALPHA_PARAM_H */ 14#endif /* _UAPI_ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index 801d28bcea51..53ae7bb1bfd1 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -467,5 +467,7 @@
467#define __NR_sendmmsg 503 467#define __NR_sendmmsg 503
468#define __NR_process_vm_readv 504 468#define __NR_process_vm_readv 504
469#define __NR_process_vm_writev 505 469#define __NR_process_vm_writev 505
470#define __NR_kcmp 506
471#define __NR_finit_module 507
470 472
471#endif /* _UAPI_ALPHA_UNISTD_H */ 473#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index f62a994ef126..a969b95ee5ac 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -12,11 +12,32 @@
12 12
13 .text 13 .text
14 .set noat 14 .set noat
15 .cfi_sections .debug_frame
15 16
16/* Stack offsets. */ 17/* Stack offsets. */
17#define SP_OFF 184 18#define SP_OFF 184
18#define SWITCH_STACK_SIZE 320 19#define SWITCH_STACK_SIZE 320
19 20
21.macro CFI_START_OSF_FRAME func
22 .align 4
23 .globl \func
24 .type \func,@function
25\func:
26 .cfi_startproc simple
27 .cfi_return_column 64
28 .cfi_def_cfa $sp, 48
29 .cfi_rel_offset 64, 8
30 .cfi_rel_offset $gp, 16
31 .cfi_rel_offset $16, 24
32 .cfi_rel_offset $17, 32
33 .cfi_rel_offset $18, 40
34.endm
35
36.macro CFI_END_OSF_FRAME func
37 .cfi_endproc
38 .size \func, . - \func
39.endm
40
20/* 41/*
21 * This defines the normal kernel pt-regs layout. 42 * This defines the normal kernel pt-regs layout.
22 * 43 *
@@ -27,100 +48,158 @@
27 * the palcode-provided values are available to the signal handler. 48 * the palcode-provided values are available to the signal handler.
28 */ 49 */
29 50
30#define SAVE_ALL \ 51.macro SAVE_ALL
31 subq $sp, SP_OFF, $sp; \ 52 subq $sp, SP_OFF, $sp
32 stq $0, 0($sp); \ 53 .cfi_adjust_cfa_offset SP_OFF
33 stq $1, 8($sp); \ 54 stq $0, 0($sp)
34 stq $2, 16($sp); \ 55 stq $1, 8($sp)
35 stq $3, 24($sp); \ 56 stq $2, 16($sp)
36 stq $4, 32($sp); \ 57 stq $3, 24($sp)
37 stq $28, 144($sp); \ 58 stq $4, 32($sp)
38 lda $2, alpha_mv; \ 59 stq $28, 144($sp)
39 stq $5, 40($sp); \ 60 .cfi_rel_offset $0, 0
40 stq $6, 48($sp); \ 61 .cfi_rel_offset $1, 8
41 stq $7, 56($sp); \ 62 .cfi_rel_offset $2, 16
42 stq $8, 64($sp); \ 63 .cfi_rel_offset $3, 24
43 stq $19, 72($sp); \ 64 .cfi_rel_offset $4, 32
44 stq $20, 80($sp); \ 65 .cfi_rel_offset $28, 144
45 stq $21, 88($sp); \ 66 lda $2, alpha_mv
46 ldq $2, HAE_CACHE($2); \ 67 stq $5, 40($sp)
47 stq $22, 96($sp); \ 68 stq $6, 48($sp)
48 stq $23, 104($sp); \ 69 stq $7, 56($sp)
49 stq $24, 112($sp); \ 70 stq $8, 64($sp)
50 stq $25, 120($sp); \ 71 stq $19, 72($sp)
51 stq $26, 128($sp); \ 72 stq $20, 80($sp)
52 stq $27, 136($sp); \ 73 stq $21, 88($sp)
53 stq $2, 152($sp); \ 74 ldq $2, HAE_CACHE($2)
54 stq $16, 160($sp); \ 75 stq $22, 96($sp)
55 stq $17, 168($sp); \ 76 stq $23, 104($sp)
77 stq $24, 112($sp)
78 stq $25, 120($sp)
79 stq $26, 128($sp)
80 stq $27, 136($sp)
81 stq $2, 152($sp)
82 stq $16, 160($sp)
83 stq $17, 168($sp)
56 stq $18, 176($sp) 84 stq $18, 176($sp)
85 .cfi_rel_offset $5, 40
86 .cfi_rel_offset $6, 48
87 .cfi_rel_offset $7, 56
88 .cfi_rel_offset $8, 64
89 .cfi_rel_offset $19, 72
90 .cfi_rel_offset $20, 80
91 .cfi_rel_offset $21, 88
92 .cfi_rel_offset $22, 96
93 .cfi_rel_offset $23, 104
94 .cfi_rel_offset $24, 112
95 .cfi_rel_offset $25, 120
96 .cfi_rel_offset $26, 128
97 .cfi_rel_offset $27, 136
98.endm
57 99
58#define RESTORE_ALL \ 100.macro RESTORE_ALL
59 lda $19, alpha_mv; \ 101 lda $19, alpha_mv
60 ldq $0, 0($sp); \ 102 ldq $0, 0($sp)
61 ldq $1, 8($sp); \ 103 ldq $1, 8($sp)
62 ldq $2, 16($sp); \ 104 ldq $2, 16($sp)
63 ldq $3, 24($sp); \ 105 ldq $3, 24($sp)
64 ldq $21, 152($sp); \ 106 ldq $21, 152($sp)
65 ldq $20, HAE_CACHE($19); \ 107 ldq $20, HAE_CACHE($19)
66 ldq $4, 32($sp); \ 108 ldq $4, 32($sp)
67 ldq $5, 40($sp); \ 109 ldq $5, 40($sp)
68 ldq $6, 48($sp); \ 110 ldq $6, 48($sp)
69 ldq $7, 56($sp); \ 111 ldq $7, 56($sp)
70 subq $20, $21, $20; \ 112 subq $20, $21, $20
71 ldq $8, 64($sp); \ 113 ldq $8, 64($sp)
72 beq $20, 99f; \ 114 beq $20, 99f
73 ldq $20, HAE_REG($19); \ 115 ldq $20, HAE_REG($19)
74 stq $21, HAE_CACHE($19); \ 116 stq $21, HAE_CACHE($19)
75 stq $21, 0($20); \ 117 stq $21, 0($20)
7699:; \ 11899: ldq $19, 72($sp)
77 ldq $19, 72($sp); \ 119 ldq $20, 80($sp)
78 ldq $20, 80($sp); \ 120 ldq $21, 88($sp)
79 ldq $21, 88($sp); \ 121 ldq $22, 96($sp)
80 ldq $22, 96($sp); \ 122 ldq $23, 104($sp)
81 ldq $23, 104($sp); \ 123 ldq $24, 112($sp)
82 ldq $24, 112($sp); \ 124 ldq $25, 120($sp)
83 ldq $25, 120($sp); \ 125 ldq $26, 128($sp)
84 ldq $26, 128($sp); \ 126 ldq $27, 136($sp)
85 ldq $27, 136($sp); \ 127 ldq $28, 144($sp)
86 ldq $28, 144($sp); \
87 addq $sp, SP_OFF, $sp 128 addq $sp, SP_OFF, $sp
129 .cfi_restore $0
130 .cfi_restore $1
131 .cfi_restore $2
132 .cfi_restore $3
133 .cfi_restore $4
134 .cfi_restore $5
135 .cfi_restore $6
136 .cfi_restore $7
137 .cfi_restore $8
138 .cfi_restore $19
139 .cfi_restore $20
140 .cfi_restore $21
141 .cfi_restore $22
142 .cfi_restore $23
143 .cfi_restore $24
144 .cfi_restore $25
145 .cfi_restore $26
146 .cfi_restore $27
147 .cfi_restore $28
148 .cfi_adjust_cfa_offset -SP_OFF
149.endm
150
151.macro DO_SWITCH_STACK
152 bsr $1, do_switch_stack
153 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
154 .cfi_rel_offset $9, 0
155 .cfi_rel_offset $10, 8
156 .cfi_rel_offset $11, 16
157 .cfi_rel_offset $12, 24
158 .cfi_rel_offset $13, 32
159 .cfi_rel_offset $14, 40
160 .cfi_rel_offset $15, 48
161 /* We don't really care about the FP registers for debugging. */
162.endm
163
164.macro UNDO_SWITCH_STACK
165 bsr $1, undo_switch_stack
166 .cfi_restore $9
167 .cfi_restore $10
168 .cfi_restore $11
169 .cfi_restore $12
170 .cfi_restore $13
171 .cfi_restore $14
172 .cfi_restore $15
173 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
174.endm
88 175
89/* 176/*
90 * Non-syscall kernel entry points. 177 * Non-syscall kernel entry points.
91 */ 178 */
92 179
93 .align 4 180CFI_START_OSF_FRAME entInt
94 .globl entInt
95 .ent entInt
96entInt:
97 SAVE_ALL 181 SAVE_ALL
98 lda $8, 0x3fff 182 lda $8, 0x3fff
99 lda $26, ret_from_sys_call 183 lda $26, ret_from_sys_call
100 bic $sp, $8, $8 184 bic $sp, $8, $8
101 mov $sp, $19 185 mov $sp, $19
102 jsr $31, do_entInt 186 jsr $31, do_entInt
103.end entInt 187CFI_END_OSF_FRAME entInt
104 188
105 .align 4 189CFI_START_OSF_FRAME entArith
106 .globl entArith
107 .ent entArith
108entArith:
109 SAVE_ALL 190 SAVE_ALL
110 lda $8, 0x3fff 191 lda $8, 0x3fff
111 lda $26, ret_from_sys_call 192 lda $26, ret_from_sys_call
112 bic $sp, $8, $8 193 bic $sp, $8, $8
113 mov $sp, $18 194 mov $sp, $18
114 jsr $31, do_entArith 195 jsr $31, do_entArith
115.end entArith 196CFI_END_OSF_FRAME entArith
116 197
117 .align 4 198CFI_START_OSF_FRAME entMM
118 .globl entMM
119 .ent entMM
120entMM:
121 SAVE_ALL 199 SAVE_ALL
122/* save $9 - $15 so the inline exception code can manipulate them. */ 200/* save $9 - $15 so the inline exception code can manipulate them. */
123 subq $sp, 56, $sp 201 subq $sp, 56, $sp
202 .cfi_adjust_cfa_offset 56
124 stq $9, 0($sp) 203 stq $9, 0($sp)
125 stq $10, 8($sp) 204 stq $10, 8($sp)
126 stq $11, 16($sp) 205 stq $11, 16($sp)
@@ -128,6 +207,13 @@ entMM:
128 stq $13, 32($sp) 207 stq $13, 32($sp)
129 stq $14, 40($sp) 208 stq $14, 40($sp)
130 stq $15, 48($sp) 209 stq $15, 48($sp)
210 .cfi_rel_offset $9, 0
211 .cfi_rel_offset $10, 8
212 .cfi_rel_offset $11, 16
213 .cfi_rel_offset $12, 24
214 .cfi_rel_offset $13, 32
215 .cfi_rel_offset $14, 40
216 .cfi_rel_offset $15, 48
131 addq $sp, 56, $19 217 addq $sp, 56, $19
132/* handle the fault */ 218/* handle the fault */
133 lda $8, 0x3fff 219 lda $8, 0x3fff
@@ -142,28 +228,33 @@ entMM:
142 ldq $14, 40($sp) 228 ldq $14, 40($sp)
143 ldq $15, 48($sp) 229 ldq $15, 48($sp)
144 addq $sp, 56, $sp 230 addq $sp, 56, $sp
231 .cfi_restore $9
232 .cfi_restore $10
233 .cfi_restore $11
234 .cfi_restore $12
235 .cfi_restore $13
236 .cfi_restore $14
237 .cfi_restore $15
238 .cfi_adjust_cfa_offset -56
145/* finish up the syscall as normal. */ 239/* finish up the syscall as normal. */
146 br ret_from_sys_call 240 br ret_from_sys_call
147.end entMM 241CFI_END_OSF_FRAME entMM
148 242
149 .align 4 243CFI_START_OSF_FRAME entIF
150 .globl entIF
151 .ent entIF
152entIF:
153 SAVE_ALL 244 SAVE_ALL
154 lda $8, 0x3fff 245 lda $8, 0x3fff
155 lda $26, ret_from_sys_call 246 lda $26, ret_from_sys_call
156 bic $sp, $8, $8 247 bic $sp, $8, $8
157 mov $sp, $17 248 mov $sp, $17
158 jsr $31, do_entIF 249 jsr $31, do_entIF
159.end entIF 250CFI_END_OSF_FRAME entIF
160 251
161 .align 4 252CFI_START_OSF_FRAME entUna
162 .globl entUna
163 .ent entUna
164entUna:
165 lda $sp, -256($sp) 253 lda $sp, -256($sp)
254 .cfi_adjust_cfa_offset 256
166 stq $0, 0($sp) 255 stq $0, 0($sp)
256 .cfi_rel_offset $0, 0
257 .cfi_remember_state
167 ldq $0, 256($sp) /* get PS */ 258 ldq $0, 256($sp) /* get PS */
168 stq $1, 8($sp) 259 stq $1, 8($sp)
169 stq $2, 16($sp) 260 stq $2, 16($sp)
@@ -195,6 +286,32 @@ entUna:
195 stq $28, 224($sp) 286 stq $28, 224($sp)
196 mov $sp, $19 287 mov $sp, $19
197 stq $gp, 232($sp) 288 stq $gp, 232($sp)
289 .cfi_rel_offset $1, 1*8
290 .cfi_rel_offset $2, 2*8
291 .cfi_rel_offset $3, 3*8
292 .cfi_rel_offset $4, 4*8
293 .cfi_rel_offset $5, 5*8
294 .cfi_rel_offset $6, 6*8
295 .cfi_rel_offset $7, 7*8
296 .cfi_rel_offset $8, 8*8
297 .cfi_rel_offset $9, 9*8
298 .cfi_rel_offset $10, 10*8
299 .cfi_rel_offset $11, 11*8
300 .cfi_rel_offset $12, 12*8
301 .cfi_rel_offset $13, 13*8
302 .cfi_rel_offset $14, 14*8
303 .cfi_rel_offset $15, 15*8
304 .cfi_rel_offset $19, 19*8
305 .cfi_rel_offset $20, 20*8
306 .cfi_rel_offset $21, 21*8
307 .cfi_rel_offset $22, 22*8
308 .cfi_rel_offset $23, 23*8
309 .cfi_rel_offset $24, 24*8
310 .cfi_rel_offset $25, 25*8
311 .cfi_rel_offset $26, 26*8
312 .cfi_rel_offset $27, 27*8
313 .cfi_rel_offset $28, 28*8
314 .cfi_rel_offset $29, 29*8
198 lda $8, 0x3fff 315 lda $8, 0x3fff
199 stq $31, 248($sp) 316 stq $31, 248($sp)
200 bic $sp, $8, $8 317 bic $sp, $8, $8
@@ -228,16 +345,45 @@ entUna:
228 ldq $28, 224($sp) 345 ldq $28, 224($sp)
229 ldq $gp, 232($sp) 346 ldq $gp, 232($sp)
230 lda $sp, 256($sp) 347 lda $sp, 256($sp)
348 .cfi_restore $1
349 .cfi_restore $2
350 .cfi_restore $3
351 .cfi_restore $4
352 .cfi_restore $5
353 .cfi_restore $6
354 .cfi_restore $7
355 .cfi_restore $8
356 .cfi_restore $9
357 .cfi_restore $10
358 .cfi_restore $11
359 .cfi_restore $12
360 .cfi_restore $13
361 .cfi_restore $14
362 .cfi_restore $15
363 .cfi_restore $19
364 .cfi_restore $20
365 .cfi_restore $21
366 .cfi_restore $22
367 .cfi_restore $23
368 .cfi_restore $24
369 .cfi_restore $25
370 .cfi_restore $26
371 .cfi_restore $27
372 .cfi_restore $28
373 .cfi_restore $29
374 .cfi_adjust_cfa_offset -256
231 call_pal PAL_rti 375 call_pal PAL_rti
232.end entUna
233 376
234 .align 4 377 .align 4
235 .ent entUnaUser
236entUnaUser: 378entUnaUser:
379 .cfi_restore_state
237 ldq $0, 0($sp) /* restore original $0 */ 380 ldq $0, 0($sp) /* restore original $0 */
238 lda $sp, 256($sp) /* pop entUna's stack frame */ 381 lda $sp, 256($sp) /* pop entUna's stack frame */
382 .cfi_restore $0
383 .cfi_adjust_cfa_offset -256
239 SAVE_ALL /* setup normal kernel stack */ 384 SAVE_ALL /* setup normal kernel stack */
240 lda $sp, -56($sp) 385 lda $sp, -56($sp)
386 .cfi_adjust_cfa_offset 56
241 stq $9, 0($sp) 387 stq $9, 0($sp)
242 stq $10, 8($sp) 388 stq $10, 8($sp)
243 stq $11, 16($sp) 389 stq $11, 16($sp)
@@ -245,6 +391,13 @@ entUnaUser:
245 stq $13, 32($sp) 391 stq $13, 32($sp)
246 stq $14, 40($sp) 392 stq $14, 40($sp)
247 stq $15, 48($sp) 393 stq $15, 48($sp)
394 .cfi_rel_offset $9, 0
395 .cfi_rel_offset $10, 8
396 .cfi_rel_offset $11, 16
397 .cfi_rel_offset $12, 24
398 .cfi_rel_offset $13, 32
399 .cfi_rel_offset $14, 40
400 .cfi_rel_offset $15, 48
248 lda $8, 0x3fff 401 lda $8, 0x3fff
249 addq $sp, 56, $19 402 addq $sp, 56, $19
250 bic $sp, $8, $8 403 bic $sp, $8, $8
@@ -257,20 +410,25 @@ entUnaUser:
257 ldq $14, 40($sp) 410 ldq $14, 40($sp)
258 ldq $15, 48($sp) 411 ldq $15, 48($sp)
259 lda $sp, 56($sp) 412 lda $sp, 56($sp)
413 .cfi_restore $9
414 .cfi_restore $10
415 .cfi_restore $11
416 .cfi_restore $12
417 .cfi_restore $13
418 .cfi_restore $14
419 .cfi_restore $15
420 .cfi_adjust_cfa_offset -56
260 br ret_from_sys_call 421 br ret_from_sys_call
261.end entUnaUser 422CFI_END_OSF_FRAME entUna
262 423
263 .align 4 424CFI_START_OSF_FRAME entDbg
264 .globl entDbg
265 .ent entDbg
266entDbg:
267 SAVE_ALL 425 SAVE_ALL
268 lda $8, 0x3fff 426 lda $8, 0x3fff
269 lda $26, ret_from_sys_call 427 lda $26, ret_from_sys_call
270 bic $sp, $8, $8 428 bic $sp, $8, $8
271 mov $sp, $16 429 mov $sp, $16
272 jsr $31, do_entDbg 430 jsr $31, do_entDbg
273.end entDbg 431CFI_END_OSF_FRAME entDbg
274 432
275/* 433/*
276 * The system call entry point is special. Most importantly, it looks 434 * The system call entry point is special. Most importantly, it looks
@@ -285,8 +443,12 @@ entDbg:
285 443
286 .align 4 444 .align 4
287 .globl entSys 445 .globl entSys
288 .globl ret_from_sys_call 446 .type entSys, @function
289 .ent entSys 447 .cfi_startproc simple
448 .cfi_return_column 64
449 .cfi_def_cfa $sp, 48
450 .cfi_rel_offset 64, 8
451 .cfi_rel_offset $gp, 16
290entSys: 452entSys:
291 SAVE_ALL 453 SAVE_ALL
292 lda $8, 0x3fff 454 lda $8, 0x3fff
@@ -300,6 +462,9 @@ entSys:
300 stq $17, SP_OFF+32($sp) 462 stq $17, SP_OFF+32($sp)
301 s8addq $0, $5, $5 463 s8addq $0, $5, $5
302 stq $18, SP_OFF+40($sp) 464 stq $18, SP_OFF+40($sp)
465 .cfi_rel_offset $16, SP_OFF+24
466 .cfi_rel_offset $17, SP_OFF+32
467 .cfi_rel_offset $18, SP_OFF+40
303 blbs $3, strace 468 blbs $3, strace
304 beq $4, 1f 469 beq $4, 1f
305 ldq $27, 0($5) 470 ldq $27, 0($5)
@@ -310,6 +475,7 @@ entSys:
310 stq $31, 72($sp) /* a3=0 => no error */ 475 stq $31, 72($sp) /* a3=0 => no error */
311 476
312 .align 4 477 .align 4
478 .globl ret_from_sys_call
313ret_from_sys_call: 479ret_from_sys_call:
314 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ 480 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
315 ldq $0, SP_OFF($sp) 481 ldq $0, SP_OFF($sp)
@@ -324,10 +490,12 @@ ret_to_user:
324 and $17, _TIF_WORK_MASK, $2 490 and $17, _TIF_WORK_MASK, $2
325 bne $2, work_pending 491 bne $2, work_pending
326restore_all: 492restore_all:
493 .cfi_remember_state
327 RESTORE_ALL 494 RESTORE_ALL
328 call_pal PAL_rti 495 call_pal PAL_rti
329 496
330ret_to_kernel: 497ret_to_kernel:
498 .cfi_restore_state
331 lda $16, 7 499 lda $16, 7
332 call_pal PAL_swpipl 500 call_pal PAL_swpipl
333 br restore_all 501 br restore_all
@@ -356,7 +524,6 @@ $ret_success:
356 stq $0, 0($sp) 524 stq $0, 0($sp)
357 stq $31, 72($sp) /* a3=0 => no error */ 525 stq $31, 72($sp) /* a3=0 => no error */
358 br ret_from_sys_call 526 br ret_from_sys_call
359.end entSys
360 527
361/* 528/*
362 * Do all cleanup when returning from all interrupts and system calls. 529 * Do all cleanup when returning from all interrupts and system calls.
@@ -370,7 +537,7 @@ $ret_success:
370 */ 537 */
371 538
372 .align 4 539 .align 4
373 .ent work_pending 540 .type work_pending, @function
374work_pending: 541work_pending:
375 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 542 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
376 bne $2, $work_notifysig 543 bne $2, $work_notifysig
@@ -387,23 +554,22 @@ $work_resched:
387 554
388$work_notifysig: 555$work_notifysig:
389 mov $sp, $16 556 mov $sp, $16
390 bsr $1, do_switch_stack 557 DO_SWITCH_STACK
391 jsr $26, do_work_pending 558 jsr $26, do_work_pending
392 bsr $1, undo_switch_stack 559 UNDO_SWITCH_STACK
393 br restore_all 560 br restore_all
394.end work_pending
395 561
396/* 562/*
397 * PTRACE syscall handler 563 * PTRACE syscall handler
398 */ 564 */
399 565
400 .align 4 566 .align 4
401 .ent strace 567 .type strace, @function
402strace: 568strace:
403 /* set up signal stack, call syscall_trace */ 569 /* set up signal stack, call syscall_trace */
404 bsr $1, do_switch_stack 570 DO_SWITCH_STACK
405 jsr $26, syscall_trace_enter /* returns the syscall number */ 571 jsr $26, syscall_trace_enter /* returns the syscall number */
406 bsr $1, undo_switch_stack 572 UNDO_SWITCH_STACK
407 573
408 /* get the arguments back.. */ 574 /* get the arguments back.. */
409 ldq $16, SP_OFF+24($sp) 575 ldq $16, SP_OFF+24($sp)
@@ -431,9 +597,9 @@ ret_from_straced:
431$strace_success: 597$strace_success:
432 stq $0, 0($sp) /* save return value */ 598 stq $0, 0($sp) /* save return value */
433 599
434 bsr $1, do_switch_stack 600 DO_SWITCH_STACK
435 jsr $26, syscall_trace_leave 601 jsr $26, syscall_trace_leave
436 bsr $1, undo_switch_stack 602 UNDO_SWITCH_STACK
437 br $31, ret_from_sys_call 603 br $31, ret_from_sys_call
438 604
439 .align 3 605 .align 3
@@ -447,26 +613,31 @@ $strace_error:
447 stq $0, 0($sp) 613 stq $0, 0($sp)
448 stq $1, 72($sp) /* a3 for return */ 614 stq $1, 72($sp) /* a3 for return */
449 615
450 bsr $1, do_switch_stack 616 DO_SWITCH_STACK
451 mov $18, $9 /* save old syscall number */ 617 mov $18, $9 /* save old syscall number */
452 mov $19, $10 /* save old a3 */ 618 mov $19, $10 /* save old a3 */
453 jsr $26, syscall_trace_leave 619 jsr $26, syscall_trace_leave
454 mov $9, $18 620 mov $9, $18
455 mov $10, $19 621 mov $10, $19
456 bsr $1, undo_switch_stack 622 UNDO_SWITCH_STACK
457 623
458 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 624 mov $31, $26 /* tell "ret_from_sys_call" we can restart */
459 br ret_from_sys_call 625 br ret_from_sys_call
460.end strace 626CFI_END_OSF_FRAME entSys
461 627
462/* 628/*
463 * Save and restore the switch stack -- aka the balance of the user context. 629 * Save and restore the switch stack -- aka the balance of the user context.
464 */ 630 */
465 631
466 .align 4 632 .align 4
467 .ent do_switch_stack 633 .type do_switch_stack, @function
634 .cfi_startproc simple
635 .cfi_return_column 64
636 .cfi_def_cfa $sp, 0
637 .cfi_register 64, $1
468do_switch_stack: 638do_switch_stack:
469 lda $sp, -SWITCH_STACK_SIZE($sp) 639 lda $sp, -SWITCH_STACK_SIZE($sp)
640 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
470 stq $9, 0($sp) 641 stq $9, 0($sp)
471 stq $10, 8($sp) 642 stq $10, 8($sp)
472 stq $11, 16($sp) 643 stq $11, 16($sp)
@@ -510,10 +681,14 @@ do_switch_stack:
510 stt $f0, 312($sp) # save fpcr in slot of $f31 681 stt $f0, 312($sp) # save fpcr in slot of $f31
511 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. 682 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
512 ret $31, ($1), 1 683 ret $31, ($1), 1
513.end do_switch_stack 684 .cfi_endproc
685 .size do_switch_stack, .-do_switch_stack
514 686
515 .align 4 687 .align 4
516 .ent undo_switch_stack 688 .type undo_switch_stack, @function
689 .cfi_startproc simple
690 .cfi_def_cfa $sp, 0
691 .cfi_register 64, $1
517undo_switch_stack: 692undo_switch_stack:
518 ldq $9, 0($sp) 693 ldq $9, 0($sp)
519 ldq $10, 8($sp) 694 ldq $10, 8($sp)
@@ -558,7 +733,8 @@ undo_switch_stack:
558 ldt $f30, 304($sp) 733 ldt $f30, 304($sp)
559 lda $sp, SWITCH_STACK_SIZE($sp) 734 lda $sp, SWITCH_STACK_SIZE($sp)
560 ret $31, ($1), 1 735 ret $31, ($1), 1
561.end undo_switch_stack 736 .cfi_endproc
737 .size undo_switch_stack, .-undo_switch_stack
562 738
563/* 739/*
564 * The meat of the context switch code. 740 * The meat of the context switch code.
@@ -566,17 +742,18 @@ undo_switch_stack:
566 742
567 .align 4 743 .align 4
568 .globl alpha_switch_to 744 .globl alpha_switch_to
569 .ent alpha_switch_to 745 .type alpha_switch_to, @function
746 .cfi_startproc
570alpha_switch_to: 747alpha_switch_to:
571 .prologue 0 748 DO_SWITCH_STACK
572 bsr $1, do_switch_stack
573 call_pal PAL_swpctx 749 call_pal PAL_swpctx
574 lda $8, 0x3fff 750 lda $8, 0x3fff
575 bsr $1, undo_switch_stack 751 UNDO_SWITCH_STACK
576 bic $sp, $8, $8 752 bic $sp, $8, $8
577 mov $17, $0 753 mov $17, $0
578 ret 754 ret
579.end alpha_switch_to 755 .cfi_endproc
756 .size alpha_switch_to, .-alpha_switch_to
580 757
581/* 758/*
582 * New processes begin life here. 759 * New processes begin life here.
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index f433fc11877a..28e4429596f3 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -236,7 +236,7 @@ void __init
236init_rtc_irq(void) 236init_rtc_irq(void)
237{ 237{
238 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, 238 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
239 handle_simple_irq, "RTC"); 239 handle_percpu_irq, "RTC");
240 setup_irq(RTC_IRQ, &timer_irqaction); 240 setup_irq(RTC_IRQ, &timer_irqaction);
241} 241}
242 242
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 7b60834fb4b2..9dbbcb3b9146 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -116,7 +116,7 @@ wait_boot_cpu_to_stop(int cpuid)
116/* 116/*
117 * Where secondaries begin a life of C. 117 * Where secondaries begin a life of C.
118 */ 118 */
119void __cpuinit 119void
120smp_callin(void) 120smp_callin(void)
121{ 121{
122 int cpuid = hard_smp_processor_id(); 122 int cpuid = hard_smp_processor_id();
@@ -194,7 +194,7 @@ wait_for_txrdy (unsigned long cpumask)
194 * Send a message to a secondary's console. "START" is one such 194 * Send a message to a secondary's console. "START" is one such
195 * interesting message. ;-) 195 * interesting message. ;-)
196 */ 196 */
197static void __cpuinit 197static void
198send_secondary_console_msg(char *str, int cpuid) 198send_secondary_console_msg(char *str, int cpuid)
199{ 199{
200 struct percpu_struct *cpu; 200 struct percpu_struct *cpu;
@@ -264,9 +264,10 @@ recv_secondary_console_msg(void)
264 if (cnt <= 0 || cnt >= 80) 264 if (cnt <= 0 || cnt >= 80)
265 strcpy(buf, "<<< BOGUS MSG >>>"); 265 strcpy(buf, "<<< BOGUS MSG >>>");
266 else { 266 else {
267 cp1 = (char *) &cpu->ipc_buffer[11]; 267 cp1 = (char *) &cpu->ipc_buffer[1];
268 cp2 = buf; 268 cp2 = buf;
269 strcpy(cp2, cp1); 269 memcpy(cp2, cp1, cnt);
270 cp2[cnt] = '\0';
270 271
271 while ((cp2 = strchr(cp2, '\r')) != 0) { 272 while ((cp2 = strchr(cp2, '\r')) != 0) {
272 *cp2 = ' '; 273 *cp2 = ' ';
@@ -285,7 +286,7 @@ recv_secondary_console_msg(void)
285/* 286/*
286 * Convince the console to have a secondary cpu begin execution. 287 * Convince the console to have a secondary cpu begin execution.
287 */ 288 */
288static int __cpuinit 289static int
289secondary_cpu_start(int cpuid, struct task_struct *idle) 290secondary_cpu_start(int cpuid, struct task_struct *idle)
290{ 291{
291 struct percpu_struct *cpu; 292 struct percpu_struct *cpu;
@@ -356,7 +357,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
356/* 357/*
357 * Bring one cpu online. 358 * Bring one cpu online.
358 */ 359 */
359static int __cpuinit 360static int
360smp_boot_one_cpu(int cpuid, struct task_struct *idle) 361smp_boot_one_cpu(int cpuid, struct task_struct *idle)
361{ 362{
362 unsigned long timeout; 363 unsigned long timeout;
@@ -472,7 +473,7 @@ smp_prepare_boot_cpu(void)
472{ 473{
473} 474}
474 475
475int __cpuinit 476int
476__cpu_up(unsigned int cpu, struct task_struct *tidle) 477__cpu_up(unsigned int cpu, struct task_struct *tidle)
477{ 478{
478 smp_boot_one_cpu(cpu, tidle); 479 smp_boot_one_cpu(cpu, tidle);
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5bf401f7ea97..6c35159bc00e 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -190,9 +190,6 @@ static struct irq_chip clipper_irq_type = {
190static void 190static void
191dp264_device_interrupt(unsigned long vector) 191dp264_device_interrupt(unsigned long vector)
192{ 192{
193#if 1
194 printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n");
195#else
196 unsigned long pld; 193 unsigned long pld;
197 unsigned int i; 194 unsigned int i;
198 195
@@ -210,12 +207,7 @@ dp264_device_interrupt(unsigned long vector)
210 isa_device_interrupt(vector); 207 isa_device_interrupt(vector);
211 else 208 else
212 handle_irq(16 + i); 209 handle_irq(16 + i);
213#if 0
214 TSUNAMI_cchip->dir0.csr = 1UL << i; mb();
215 tmp = TSUNAMI_cchip->dir0.csr;
216#endif
217 } 210 }
218#endif
219} 211}
220 212
221static void 213static void
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 407accc80877..c92e389ff219 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -317,8 +317,9 @@ marvel_init_irq(void)
317} 317}
318 318
319static int 319static int
320marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 320marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
321{ 321{
322 struct pci_dev *dev = (struct pci_dev *)cdev;
322 struct pci_controller *hose = dev->sysdata; 323 struct pci_controller *hose = dev->sysdata;
323 struct io7_port *io7_port = hose->sysdata; 324 struct io7_port *io7_port = hose->sysdata;
324 struct io7 *io7 = io7_port->io7; 325 struct io7 *io7 = io7_port->io7;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 4284ec798ec9..dca9b3fb0071 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -524,6 +524,8 @@ sys_call_table:
524 .quad sys_sendmmsg 524 .quad sys_sendmmsg
525 .quad sys_process_vm_readv 525 .quad sys_process_vm_readv
526 .quad sys_process_vm_writev /* 505 */ 526 .quad sys_process_vm_writev /* 505 */
527 .quad sys_kcmp
528 .quad sys_finit_module
527 529
528 .size sys_call_table, . - sys_call_table 530 .size sys_call_table, . - sys_call_table
529 .type sys_call_table, @object 531 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index e336694ca042..ea3395036556 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -105,9 +105,7 @@ void arch_irq_work_raise(void)
105 105
106static inline __u32 rpcc(void) 106static inline __u32 rpcc(void)
107{ 107{
108 __u32 result; 108 return __builtin_alpha_rpcc();
109 asm volatile ("rpcc %0" : "=r"(result));
110 return result;
111} 109}
112 110
113int update_persistent_clock(struct timespec now) 111int update_persistent_clock(struct timespec now)
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index affccb959a9e..bd0665cdc840 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -32,7 +32,7 @@
32 32
33static int opDEC_fix; 33static int opDEC_fix;
34 34
35static void __cpuinit 35static void
36opDEC_check(void) 36opDEC_check(void)
37{ 37{
38 __asm__ __volatile__ ( 38 __asm__ __volatile__ (
@@ -66,8 +66,8 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
66{ 66{
67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", 67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
68 regs->pc, regs->r26, regs->ps, print_tainted()); 68 regs->pc, regs->r26, regs->ps, print_tainted());
69 print_symbol("pc is at %s\n", regs->pc); 69 printk("pc is at %pSR\n", (void *)regs->pc);
70 print_symbol("ra is at %s\n", regs->r26 ); 70 printk("ra is at %pSR\n", (void *)regs->r26);
71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", 71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
72 regs->r0, regs->r1, regs->r2); 72 regs->r0, regs->r1, regs->r2);
73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", 73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
@@ -132,9 +132,7 @@ dik_show_trace(unsigned long *sp)
132 continue; 132 continue;
133 if (tmp >= (unsigned long) &_etext) 133 if (tmp >= (unsigned long) &_etext)
134 continue; 134 continue;
135 printk("[<%lx>]", tmp); 135 printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
136 print_symbol(" %s", tmp);
137 printk("\n");
138 if (i > 40) { 136 if (i > 40) {
139 printk(" ..."); 137 printk(" ...");
140 break; 138 break;
@@ -1059,7 +1057,7 @@ give_sigbus:
1059 return; 1057 return;
1060} 1058}
1061 1059
1062void __cpuinit 1060void
1063trap_init(void) 1061trap_init(void)
1064{ 1062{
1065 /* Tell PAL-code what global pointer we want in the kernel. */ 1063 /* Tell PAL-code what global pointer we want in the kernel. */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 8943c028d4bb..df57611652e5 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -38,6 +38,7 @@
38#include <asm/ptrace.h> 38#include <asm/ptrace.h>
39#include <asm/processor.h> /* For VMALLOC_START */ 39#include <asm/processor.h> /* For VMALLOC_START */
40#include <asm/thread_info.h> /* For THREAD_SIZE */ 40#include <asm/thread_info.h> /* For THREAD_SIZE */
41#include <asm/mmu.h>
41 42
42/* Note on the LD/ST addr modes with addr reg wback 43/* Note on the LD/ST addr modes with addr reg wback
43 * 44 *
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e02ec0c..43594d5116ef 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -20,7 +20,6 @@ config ARM
20 select GENERIC_STRNCPY_FROM_USER 20 select GENERIC_STRNCPY_FROM_USER
21 select GENERIC_STRNLEN_USER 21 select GENERIC_STRNLEN_USER
22 select HARDIRQS_SW_RESEND 22 select HARDIRQS_SW_RESEND
23 select HAVE_AOUT
24 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL 23 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
25 select HAVE_ARCH_KGDB 24 select HAVE_ARCH_KGDB
26 select HAVE_ARCH_SECCOMP_FILTER 25 select HAVE_ARCH_SECCOMP_FILTER
@@ -218,7 +217,8 @@ config VECTORS_BASE
218 default DRAM_BASE if REMAP_VECTORS_TO_RAM 217 default DRAM_BASE if REMAP_VECTORS_TO_RAM
219 default 0x00000000 218 default 0x00000000
220 help 219 help
221 The base address of exception vectors. 220 The base address of exception vectors. This must be two pages
221 in size.
222 222
223config ARM_PATCH_PHYS_VIRT 223config ARM_PATCH_PHYS_VIRT
224 bool "Patch physical to virtual translations at runtime" if EMBEDDED 224 bool "Patch physical to virtual translations at runtime" if EMBEDDED
@@ -1600,8 +1600,7 @@ config LOCAL_TIMERS
1600config ARCH_NR_GPIO 1600config ARCH_NR_GPIO
1601 int 1601 int
1602 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA 1602 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
1603 default 512 if SOC_OMAP5 1603 default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5
1604 default 512 if ARCH_KEYSTONE
1605 default 392 if ARCH_U8500 1604 default 392 if ARCH_U8500
1606 default 352 if ARCH_VT8500 1605 default 352 if ARCH_VT8500
1607 default 288 if ARCH_SUNXI 1606 default 288 if ARCH_SUNXI
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index e401a766c0bd..583f4a00ec32 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -804,9 +804,19 @@ config DEBUG_LL_INCLUDE
804 804
805config DEBUG_UNCOMPRESS 805config DEBUG_UNCOMPRESS
806 bool 806 bool
807 default y if ARCH_MULTIPLATFORM && DEBUG_LL && \ 807 depends on ARCH_MULTIPLATFORM
808 !DEBUG_OMAP2PLUS_UART && \ 808 default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
809 !DEBUG_TEGRA_UART 809 !DEBUG_TEGRA_UART
810 help
811 This option influences the normal decompressor output for
812 multiplatform kernels. Normally, multiplatform kernels disable
813 decompressor output because it is not possible to know where to
814 send the decompressor output.
815
816 When this option is set, the selected DEBUG_LL output method
817 will be re-used for normal decompressor output on multiplatform
818 kernels.
819
810 820
811config UNCOMPRESS_INCLUDE 821config UNCOMPRESS_INCLUDE
812 string 822 string
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c0ac0f5e5e5c..6fd2ceae305a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI) += davinci
153machine-$(CONFIG_ARCH_DOVE) += dove 153machine-$(CONFIG_ARCH_DOVE) += dove
154machine-$(CONFIG_ARCH_EBSA110) += ebsa110 154machine-$(CONFIG_ARCH_EBSA110) += ebsa110
155machine-$(CONFIG_ARCH_EP93XX) += ep93xx 155machine-$(CONFIG_ARCH_EP93XX) += ep93xx
156machine-$(CONFIG_ARCH_EXYNOS) += exynos
156machine-$(CONFIG_ARCH_GEMINI) += gemini 157machine-$(CONFIG_ARCH_GEMINI) += gemini
157machine-$(CONFIG_ARCH_HIGHBANK) += highbank 158machine-$(CONFIG_ARCH_HIGHBANK) += highbank
158machine-$(CONFIG_ARCH_INTEGRATOR) += integrator 159machine-$(CONFIG_ARCH_INTEGRATOR) += integrator
@@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX) += iop13xx
160machine-$(CONFIG_ARCH_IOP32X) += iop32x 161machine-$(CONFIG_ARCH_IOP32X) += iop32x
161machine-$(CONFIG_ARCH_IOP33X) += iop33x 162machine-$(CONFIG_ARCH_IOP33X) += iop33x
162machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx 163machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
164machine-$(CONFIG_ARCH_KEYSTONE) += keystone
163machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood 165machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood
164machine-$(CONFIG_ARCH_KS8695) += ks8695 166machine-$(CONFIG_ARCH_KS8695) += ks8695
165machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx 167machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
166machine-$(CONFIG_ARCH_MMP) += mmp 168machine-$(CONFIG_ARCH_MMP) += mmp
167machine-$(CONFIG_ARCH_MSM) += msm 169machine-$(CONFIG_ARCH_MSM) += msm
168machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0 170machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
171machine-$(CONFIG_ARCH_MVEBU) += mvebu
169machine-$(CONFIG_ARCH_MXC) += imx 172machine-$(CONFIG_ARCH_MXC) += imx
170machine-$(CONFIG_ARCH_MXS) += mxs 173machine-$(CONFIG_ARCH_MXS) += mxs
171machine-$(CONFIG_ARCH_MVEBU) += mvebu
172machine-$(CONFIG_ARCH_NETX) += netx 174machine-$(CONFIG_ARCH_NETX) += netx
173machine-$(CONFIG_ARCH_NOMADIK) += nomadik 175machine-$(CONFIG_ARCH_NOMADIK) += nomadik
174machine-$(CONFIG_ARCH_NSPIRE) += nspire 176machine-$(CONFIG_ARCH_NSPIRE) += nspire
@@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1) += omap1
176machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2 178machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2
177machine-$(CONFIG_ARCH_ORION5X) += orion5x 179machine-$(CONFIG_ARCH_ORION5X) += orion5x
178machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell 180machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
179machine-$(CONFIG_ARCH_SIRF) += prima2
180machine-$(CONFIG_ARCH_PXA) += pxa 181machine-$(CONFIG_ARCH_PXA) += pxa
181machine-$(CONFIG_ARCH_REALVIEW) += realview 182machine-$(CONFIG_ARCH_REALVIEW) += realview
182machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip 183machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
@@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx
186machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0 187machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0
187machine-$(CONFIG_ARCH_S5PC100) += s5pc100 188machine-$(CONFIG_ARCH_S5PC100) += s5pc100
188machine-$(CONFIG_ARCH_S5PV210) += s5pv210 189machine-$(CONFIG_ARCH_S5PV210) += s5pv210
189machine-$(CONFIG_ARCH_EXYNOS) += exynos
190machine-$(CONFIG_ARCH_SA1100) += sa1100 190machine-$(CONFIG_ARCH_SA1100) += sa1100
191machine-$(CONFIG_ARCH_SHARK) += shark 191machine-$(CONFIG_ARCH_SHARK) += shark
192machine-$(CONFIG_ARCH_SHMOBILE) += shmobile 192machine-$(CONFIG_ARCH_SHMOBILE) += shmobile
193machine-$(CONFIG_ARCH_SIRF) += prima2
194machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
195machine-$(CONFIG_ARCH_STI) += sti
196machine-$(CONFIG_ARCH_SUNXI) += sunxi
193machine-$(CONFIG_ARCH_TEGRA) += tegra 197machine-$(CONFIG_ARCH_TEGRA) += tegra
194machine-$(CONFIG_ARCH_U300) += u300 198machine-$(CONFIG_ARCH_U300) += u300
195machine-$(CONFIG_ARCH_U8500) += ux500 199machine-$(CONFIG_ARCH_U8500) += ux500
196machine-$(CONFIG_ARCH_VERSATILE) += versatile 200machine-$(CONFIG_ARCH_VERSATILE) += versatile
197machine-$(CONFIG_ARCH_VEXPRESS) += vexpress 201machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
202machine-$(CONFIG_ARCH_VIRT) += virt
198machine-$(CONFIG_ARCH_VT8500) += vt8500 203machine-$(CONFIG_ARCH_VT8500) += vt8500
199machine-$(CONFIG_ARCH_W90X900) += w90x900 204machine-$(CONFIG_ARCH_W90X900) += w90x900
205machine-$(CONFIG_ARCH_ZYNQ) += zynq
200machine-$(CONFIG_FOOTBRIDGE) += footbridge 206machine-$(CONFIG_FOOTBRIDGE) += footbridge
201machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
202machine-$(CONFIG_PLAT_SPEAR) += spear 207machine-$(CONFIG_PLAT_SPEAR) += spear
203machine-$(CONFIG_ARCH_STI) += sti
204machine-$(CONFIG_ARCH_VIRT) += virt
205machine-$(CONFIG_ARCH_ZYNQ) += zynq
206machine-$(CONFIG_ARCH_SUNXI) += sunxi
207machine-$(CONFIG_ARCH_KEYSTONE) += keystone
208 208
209# Platform directory name. This list is sorted alphanumerically 209# Platform directory name. This list is sorted alphanumerically
210# by CONFIG_* macro name. 210# by CONFIG_* macro name.
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 9866cd736dee..a0f2721ea583 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -485,6 +485,12 @@
485 sirf,function = "usp0"; 485 sirf,function = "usp0";
486 }; 486 };
487 }; 487 };
488 usp0_uart_nostreamctrl_pins_a: usp0@1 {
489 usp0 {
490 sirf,pins = "usp0_uart_nostreamctrl_grp";
491 sirf,function = "usp0_uart_nostreamctrl";
492 };
493 };
488 usp1_pins_a: usp1@0 { 494 usp1_pins_a: usp1@0 {
489 usp1 { 495 usp1 {
490 sirf,pins = "usp1grp"; 496 sirf,pins = "usp1grp";
@@ -515,16 +521,16 @@
515 sirf,function = "pulse_count"; 521 sirf,function = "pulse_count";
516 }; 522 };
517 }; 523 };
518 cko0_rst_pins_a: cko0_rst@0 { 524 cko0_pins_a: cko0@0 {
519 cko0_rst { 525 cko0 {
520 sirf,pins = "cko0_rstgrp"; 526 sirf,pins = "cko0grp";
521 sirf,function = "cko0_rst"; 527 sirf,function = "cko0";
522 }; 528 };
523 }; 529 };
524 cko1_rst_pins_a: cko1_rst@0 { 530 cko1_pins_a: cko1@0 {
525 cko1_rst { 531 cko1 {
526 sirf,pins = "cko1_rstgrp"; 532 sirf,pins = "cko1grp";
527 sirf,function = "cko1_rst"; 533 sirf,function = "cko1";
528 }; 534 };
529 }; 535 };
530 }; 536 };
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
index 43bf3c796cba..0e7fed47bd8d 100644
--- a/arch/arm/boot/dts/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -147,7 +147,7 @@
147 reg = <0x0a>; 147 reg = <0x0a>;
148 VDDA-supply = <&reg_3p3v>; 148 VDDA-supply = <&reg_3p3v>;
149 VDDIO-supply = <&reg_3p3v>; 149 VDDIO-supply = <&reg_3p3v>;
150 150 clocks = <&saif0>;
151 }; 151 };
152 152
153 pcf8563: rtc@51 { 153 pcf8563: rtc@51 {
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 1f0d38d7b16f..e035f4664b97 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -195,7 +195,7 @@
195 reg = <0x0a>; 195 reg = <0x0a>;
196 VDDA-supply = <&reg_3p3v>; 196 VDDA-supply = <&reg_3p3v>;
197 VDDIO-supply = <&reg_3p3v>; 197 VDDIO-supply = <&reg_3p3v>;
198 198 clocks = <&saif0>;
199 }; 199 };
200 200
201 at24@51 { 201 at24@51 {
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 880df2f13be8..44d9da57736e 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -184,7 +184,7 @@
184 reg = <0x0a>; 184 reg = <0x0a>;
185 VDDA-supply = <&reg_3p3v>; 185 VDDA-supply = <&reg_3p3v>;
186 VDDIO-supply = <&reg_3p3v>; 186 VDDIO-supply = <&reg_3p3v>;
187 187 clocks = <&saif0>;
188 }; 188 };
189 189
190 eeprom: eeprom@51 { 190 eeprom: eeprom@51 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 6a8acb01b1d3..9524a0571281 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -837,6 +837,7 @@
837 compatible = "fsl,imx28-saif"; 837 compatible = "fsl,imx28-saif";
838 reg = <0x80042000 0x2000>; 838 reg = <0x80042000 0x2000>;
839 interrupts = <59 80>; 839 interrupts = <59 80>;
840 #clock-cells = <0>;
840 clocks = <&clks 53>; 841 clocks = <&clks 53>;
841 dmas = <&dma_apbx 4>; 842 dmas = <&dma_apbx 4>;
842 dma-names = "rx-tx"; 843 dma-names = "rx-tx";
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 6dd9486c755b..ad3471ca17c7 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -61,6 +61,16 @@
61 mux-int-port = <2>; 61 mux-int-port = <2>;
62 mux-ext-port = <3>; 62 mux-ext-port = <3>;
63 }; 63 };
64
65 clocks {
66 clk_26M: codec_clock {
67 compatible = "fixed-clock";
68 reg=<0>;
69 #clock-cells = <0>;
70 clock-frequency = <26000000>;
71 gpios = <&gpio4 26 1>;
72 };
73 };
64}; 74};
65 75
66&esdhc1 { 76&esdhc1 {
@@ -229,6 +239,7 @@
229 MX51_PAD_EIM_A27__GPIO2_21 0x5 239 MX51_PAD_EIM_A27__GPIO2_21 0x5
230 MX51_PAD_CSPI1_SS0__GPIO4_24 0x85 240 MX51_PAD_CSPI1_SS0__GPIO4_24 0x85
231 MX51_PAD_CSPI1_SS1__GPIO4_25 0x85 241 MX51_PAD_CSPI1_SS1__GPIO4_25 0x85
242 MX51_PAD_CSPI1_RDY__GPIO4_26 0x80000000
232 >; 243 >;
233 }; 244 };
234 }; 245 };
@@ -255,7 +266,7 @@
255 sgtl5000: codec@0a { 266 sgtl5000: codec@0a {
256 compatible = "fsl,sgtl5000"; 267 compatible = "fsl,sgtl5000";
257 reg = <0x0a>; 268 reg = <0x0a>;
258 clock-frequency = <26000000>; 269 clocks = <&clk_26M>;
259 VDDA-supply = <&vdig_reg>; 270 VDDA-supply = <&vdig_reg>;
260 VDDIO-supply = <&vvideo_reg>; 271 VDDIO-supply = <&vvideo_reg>;
261 }; 272 };
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index aaa33bc99f78..a63090267941 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -27,7 +27,7 @@
27 27
28 backlight { 28 backlight {
29 compatible = "pwm-backlight"; 29 compatible = "pwm-backlight";
30 pwms = <&pwm2 0 50000 0 0>; 30 pwms = <&pwm2 0 50000>;
31 brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>; 31 brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>;
32 default-brightness-level = <10>; 32 default-brightness-level = <10>;
33 enable-gpios = <&gpio7 7 0>; 33 enable-gpios = <&gpio7 7 0>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 3895fbba8fce..569aa9f2c4ed 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -725,15 +725,15 @@
725 uart1 { 725 uart1 {
726 pinctrl_uart1_1: uart1grp-1 { 726 pinctrl_uart1_1: uart1grp-1 {
727 fsl,pins = < 727 fsl,pins = <
728 MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1c5 728 MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1e4
729 MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1c5 729 MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1e4
730 >; 730 >;
731 }; 731 };
732 732
733 pinctrl_uart1_2: uart1grp-2 { 733 pinctrl_uart1_2: uart1grp-2 {
734 fsl,pins = < 734 fsl,pins = <
735 MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1c5 735 MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1e4
736 MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1c5 736 MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1e4
737 >; 737 >;
738 }; 738 };
739 739
@@ -748,8 +748,8 @@
748 uart2 { 748 uart2 {
749 pinctrl_uart2_1: uart2grp-1 { 749 pinctrl_uart2_1: uart2grp-1 {
750 fsl,pins = < 750 fsl,pins = <
751 MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1c5 751 MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1e4
752 MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1c5 752 MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1e4
753 >; 753 >;
754 }; 754 };
755 755
@@ -766,17 +766,17 @@
766 uart3 { 766 uart3 {
767 pinctrl_uart3_1: uart3grp-1 { 767 pinctrl_uart3_1: uart3grp-1 {
768 fsl,pins = < 768 fsl,pins = <
769 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5 769 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
770 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5 770 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
771 MX53_PAD_PATA_DA_1__UART3_CTS 0x1c5 771 MX53_PAD_PATA_DA_1__UART3_CTS 0x1e4
772 MX53_PAD_PATA_DA_2__UART3_RTS 0x1c5 772 MX53_PAD_PATA_DA_2__UART3_RTS 0x1e4
773 >; 773 >;
774 }; 774 };
775 775
776 pinctrl_uart3_2: uart3grp-2 { 776 pinctrl_uart3_2: uart3grp-2 {
777 fsl,pins = < 777 fsl,pins = <
778 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5 778 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
779 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5 779 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
780 >; 780 >;
781 }; 781 };
782 782
@@ -785,8 +785,8 @@
785 uart4 { 785 uart4 {
786 pinctrl_uart4_1: uart4grp-1 { 786 pinctrl_uart4_1: uart4grp-1 {
787 fsl,pins = < 787 fsl,pins = <
788 MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1c5 788 MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1e4
789 MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1c5 789 MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1e4
790 >; 790 >;
791 }; 791 };
792 }; 792 };
@@ -794,8 +794,8 @@
794 uart5 { 794 uart5 {
795 pinctrl_uart5_1: uart5grp-1 { 795 pinctrl_uart5_1: uart5grp-1 {
796 fsl,pins = < 796 fsl,pins = <
797 MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1c5 797 MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1e4
798 MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1c5 798 MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1e4
799 >; 799 >;
800 }; 800 };
801 }; 801 };
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index db2060c46540..9c1167b0459b 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -26,7 +26,7 @@
26 cpu-offset = <0x80000>; 26 cpu-offset = <0x80000>;
27 }; 27 };
28 28
29 msmgpio: gpio@fd510000 { 29 msmgpio: gpio@800000 {
30 compatible = "qcom,msm-gpio"; 30 compatible = "qcom,msm-gpio";
31 gpio-controller; 31 gpio-controller;
32 #gpio-cells = <2>; 32 #gpio-cells = <2>;
@@ -34,7 +34,7 @@
34 interrupts = <0 32 0x4>; 34 interrupts = <0 32 0x4>;
35 interrupt-controller; 35 interrupt-controller;
36 #interrupt-cells = <2>; 36 #interrupt-cells = <2>;
37 reg = <0xfd510000 0x4000>; 37 reg = <0x800000 0x4000>;
38 }; 38 };
39 39
40 serial@16440000 { 40 serial@16440000 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 08b72678abff..65d7b601651c 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -235,7 +235,7 @@
235}; 235};
236 236
237&mmc1 { 237&mmc1 {
238 vmmc-supply = <&vmmcsd_fixed>; 238 vmmc-supply = <&ldo9_reg>;
239 bus-width = <4>; 239 bus-width = <4>;
240}; 240};
241 241
@@ -282,6 +282,7 @@
282 282
283 regulators { 283 regulators {
284 smps123_reg: smps123 { 284 smps123_reg: smps123 {
285 /* VDD_OPP_MPU */
285 regulator-name = "smps123"; 286 regulator-name = "smps123";
286 regulator-min-microvolt = < 600000>; 287 regulator-min-microvolt = < 600000>;
287 regulator-max-microvolt = <1500000>; 288 regulator-max-microvolt = <1500000>;
@@ -290,6 +291,7 @@
290 }; 291 };
291 292
292 smps45_reg: smps45 { 293 smps45_reg: smps45 {
294 /* VDD_OPP_MM */
293 regulator-name = "smps45"; 295 regulator-name = "smps45";
294 regulator-min-microvolt = < 600000>; 296 regulator-min-microvolt = < 600000>;
295 regulator-max-microvolt = <1310000>; 297 regulator-max-microvolt = <1310000>;
@@ -298,6 +300,7 @@
298 }; 300 };
299 301
300 smps6_reg: smps6 { 302 smps6_reg: smps6 {
303 /* VDD_DDR3 - over VDD_SMPS6 */
301 regulator-name = "smps6"; 304 regulator-name = "smps6";
302 regulator-min-microvolt = <1200000>; 305 regulator-min-microvolt = <1200000>;
303 regulator-max-microvolt = <1200000>; 306 regulator-max-microvolt = <1200000>;
@@ -306,6 +309,7 @@
306 }; 309 };
307 310
308 smps7_reg: smps7 { 311 smps7_reg: smps7 {
312 /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
309 regulator-name = "smps7"; 313 regulator-name = "smps7";
310 regulator-min-microvolt = <1800000>; 314 regulator-min-microvolt = <1800000>;
311 regulator-max-microvolt = <1800000>; 315 regulator-max-microvolt = <1800000>;
@@ -314,6 +318,7 @@
314 }; 318 };
315 319
316 smps8_reg: smps8 { 320 smps8_reg: smps8 {
321 /* VDD_OPP_CORE */
317 regulator-name = "smps8"; 322 regulator-name = "smps8";
318 regulator-min-microvolt = < 600000>; 323 regulator-min-microvolt = < 600000>;
319 regulator-max-microvolt = <1310000>; 324 regulator-max-microvolt = <1310000>;
@@ -322,15 +327,15 @@
322 }; 327 };
323 328
324 smps9_reg: smps9 { 329 smps9_reg: smps9 {
330 /* VDDA_2v1_AUD over VDD_2v1 */
325 regulator-name = "smps9"; 331 regulator-name = "smps9";
326 regulator-min-microvolt = <2100000>; 332 regulator-min-microvolt = <2100000>;
327 regulator-max-microvolt = <2100000>; 333 regulator-max-microvolt = <2100000>;
328 regulator-always-on;
329 regulator-boot-on;
330 ti,smps-range = <0x80>; 334 ti,smps-range = <0x80>;
331 }; 335 };
332 336
333 smps10_reg: smps10 { 337 smps10_reg: smps10 {
338 /* VBUS_5V_OTG */
334 regulator-name = "smps10"; 339 regulator-name = "smps10";
335 regulator-min-microvolt = <5000000>; 340 regulator-min-microvolt = <5000000>;
336 regulator-max-microvolt = <5000000>; 341 regulator-max-microvolt = <5000000>;
@@ -339,38 +344,40 @@
339 }; 344 };
340 345
341 ldo1_reg: ldo1 { 346 ldo1_reg: ldo1 {
347 /* VDDAPHY_CAM: vdda_csiport */
342 regulator-name = "ldo1"; 348 regulator-name = "ldo1";
343 regulator-min-microvolt = <2800000>; 349 regulator-min-microvolt = <1500000>;
344 regulator-max-microvolt = <2800000>; 350 regulator-max-microvolt = <1800000>;
345 regulator-always-on;
346 regulator-boot-on;
347 }; 351 };
348 352
349 ldo2_reg: ldo2 { 353 ldo2_reg: ldo2 {
354 /* VCC_2V8_DISP: Does not go anywhere */
350 regulator-name = "ldo2"; 355 regulator-name = "ldo2";
351 regulator-min-microvolt = <2900000>; 356 regulator-min-microvolt = <2800000>;
352 regulator-max-microvolt = <2900000>; 357 regulator-max-microvolt = <2800000>;
353 regulator-always-on; 358 /* Unused */
354 regulator-boot-on; 359 status = "disabled";
355 }; 360 };
356 361
357 ldo3_reg: ldo3 { 362 ldo3_reg: ldo3 {
363 /* VDDAPHY_MDM: vdda_lli */
358 regulator-name = "ldo3"; 364 regulator-name = "ldo3";
359 regulator-min-microvolt = <3000000>; 365 regulator-min-microvolt = <1500000>;
360 regulator-max-microvolt = <3000000>; 366 regulator-max-microvolt = <1500000>;
361 regulator-always-on;
362 regulator-boot-on; 367 regulator-boot-on;
368 /* Only if Modem is used */
369 status = "disabled";
363 }; 370 };
364 371
365 ldo4_reg: ldo4 { 372 ldo4_reg: ldo4 {
373 /* VDDAPHY_DISP: vdda_dsiport/hdmi */
366 regulator-name = "ldo4"; 374 regulator-name = "ldo4";
367 regulator-min-microvolt = <2200000>; 375 regulator-min-microvolt = <1500000>;
368 regulator-max-microvolt = <2200000>; 376 regulator-max-microvolt = <1800000>;
369 regulator-always-on;
370 regulator-boot-on;
371 }; 377 };
372 378
373 ldo5_reg: ldo5 { 379 ldo5_reg: ldo5 {
380 /* VDDA_1V8_PHY: usb/sata/hdmi.. */
374 regulator-name = "ldo5"; 381 regulator-name = "ldo5";
375 regulator-min-microvolt = <1800000>; 382 regulator-min-microvolt = <1800000>;
376 regulator-max-microvolt = <1800000>; 383 regulator-max-microvolt = <1800000>;
@@ -379,38 +386,43 @@
379 }; 386 };
380 387
381 ldo6_reg: ldo6 { 388 ldo6_reg: ldo6 {
389 /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
382 regulator-name = "ldo6"; 390 regulator-name = "ldo6";
383 regulator-min-microvolt = <1500000>; 391 regulator-min-microvolt = <1200000>;
384 regulator-max-microvolt = <1500000>; 392 regulator-max-microvolt = <1200000>;
385 regulator-always-on; 393 regulator-always-on;
386 regulator-boot-on; 394 regulator-boot-on;
387 }; 395 };
388 396
389 ldo7_reg: ldo7 { 397 ldo7_reg: ldo7 {
398 /* VDD_VPP: vpp1 */
390 regulator-name = "ldo7"; 399 regulator-name = "ldo7";
391 regulator-min-microvolt = <1500000>; 400 regulator-min-microvolt = <2000000>;
392 regulator-max-microvolt = <1500000>; 401 regulator-max-microvolt = <2000000>;
393 regulator-always-on; 402 /* Only for efuse reprograming! */
394 regulator-boot-on; 403 status = "disabled";
395 }; 404 };
396 405
397 ldo8_reg: ldo8 { 406 ldo8_reg: ldo8 {
407 /* VDD_3v0: Does not go anywhere */
398 regulator-name = "ldo8"; 408 regulator-name = "ldo8";
399 regulator-min-microvolt = <1500000>; 409 regulator-min-microvolt = <3000000>;
400 regulator-max-microvolt = <1500000>; 410 regulator-max-microvolt = <3000000>;
401 regulator-always-on;
402 regulator-boot-on; 411 regulator-boot-on;
412 /* Unused */
413 status = "disabled";
403 }; 414 };
404 415
405 ldo9_reg: ldo9 { 416 ldo9_reg: ldo9 {
417 /* VCC_DV_SDIO: vdds_sdcard */
406 regulator-name = "ldo9"; 418 regulator-name = "ldo9";
407 regulator-min-microvolt = <1800000>; 419 regulator-min-microvolt = <1800000>;
408 regulator-max-microvolt = <3300000>; 420 regulator-max-microvolt = <3000000>;
409 regulator-always-on;
410 regulator-boot-on; 421 regulator-boot-on;
411 }; 422 };
412 423
413 ldoln_reg: ldoln { 424 ldoln_reg: ldoln {
425 /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
414 regulator-name = "ldoln"; 426 regulator-name = "ldoln";
415 regulator-min-microvolt = <1800000>; 427 regulator-min-microvolt = <1800000>;
416 regulator-max-microvolt = <1800000>; 428 regulator-max-microvolt = <1800000>;
@@ -419,12 +431,20 @@
419 }; 431 };
420 432
421 ldousb_reg: ldousb { 433 ldousb_reg: ldousb {
434 /* VDDA_3V_USB: VDDA_USBHS33 */
422 regulator-name = "ldousb"; 435 regulator-name = "ldousb";
423 regulator-min-microvolt = <3250000>; 436 regulator-min-microvolt = <3250000>;
424 regulator-max-microvolt = <3250000>; 437 regulator-max-microvolt = <3250000>;
425 regulator-always-on; 438 regulator-always-on;
426 regulator-boot-on; 439 regulator-boot-on;
427 }; 440 };
441
442 regen3_reg: regen3 {
443 /* REGEN3 controls LDO9 supply to card */
444 regulator-name = "regen3";
445 regulator-always-on;
446 regulator-boot-on;
447 };
428 }; 448 };
429 }; 449 };
430 }; 450 };
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 05e9489cf95c..bbeb623fc2c6 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -515,16 +515,16 @@
515 sirf,function = "pulse_count"; 515 sirf,function = "pulse_count";
516 }; 516 };
517 }; 517 };
518 cko0_rst_pins_a: cko0_rst@0 { 518 cko0_pins_a: cko0@0 {
519 cko0_rst { 519 cko0 {
520 sirf,pins = "cko0_rstgrp"; 520 sirf,pins = "cko0grp";
521 sirf,function = "cko0_rst"; 521 sirf,function = "cko0";
522 }; 522 };
523 }; 523 };
524 cko1_rst_pins_a: cko1_rst@0 { 524 cko1_pins_a: cko1@0 {
525 cko1_rst { 525 cko1 {
526 sirf,pins = "cko1_rstgrp"; 526 sirf,pins = "cko1grp";
527 sirf,function = "cko1_rst"; 527 sirf,function = "cko1";
528 }; 528 };
529 }; 529 };
530 }; 530 };
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
index 957b21a71b4b..0f246c979262 100644
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi
@@ -166,6 +166,15 @@
166 reg = <0x9000 0x100>; 166 reg = <0x9000 0x100>;
167 st,bank-name = "PIO31"; 167 st,bank-name = "PIO31";
168 }; 168 };
169
170 serial2-oe {
171 pinctrl_serial2_oe: serial2-1 {
172 st,pins {
173 output-enable = <&PIO11 3 ALT2 OUT>;
174 };
175 };
176 };
177
169 }; 178 };
170 179
171 pin-controller-rear { 180 pin-controller-rear {
@@ -218,7 +227,6 @@
218 st,pins { 227 st,pins {
219 tx = <&PIO17 4 ALT2 OUT>; 228 tx = <&PIO17 4 ALT2 OUT>;
220 rx = <&PIO17 5 ALT2 IN>; 229 rx = <&PIO17 5 ALT2 IN>;
221 output-enable = <&PIO11 3 ALT2 OUT>;
222 }; 230 };
223 }; 231 };
224 }; 232 };
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 3cecd9689a49..1a0326ea7d07 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -79,7 +79,7 @@
79 interrupts = <0 197 0>; 79 interrupts = <0 197 0>;
80 clocks = <&CLK_S_ICN_REG_0>; 80 clocks = <&CLK_S_ICN_REG_0>;
81 pinctrl-names = "default"; 81 pinctrl-names = "default";
82 pinctrl-0 = <&pinctrl_serial2>; 82 pinctrl-0 = <&pinctrl_serial2 &pinctrl_serial2_oe>;
83 }; 83 };
84 84
85 /* SBC_UART1 */ 85 /* SBC_UART1 */
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi
index 7321403cab8a..f5b9898d9c6e 100644
--- a/arch/arm/boot/dts/stih41x.dtsi
+++ b/arch/arm/boot/dts/stih41x.dtsi
@@ -6,10 +6,12 @@
6 #address-cells = <1>; 6 #address-cells = <1>;
7 #size-cells = <0>; 7 #size-cells = <0>;
8 cpu@0 { 8 cpu@0 {
9 device_type = "cpu";
9 compatible = "arm,cortex-a9"; 10 compatible = "arm,cortex-a9";
10 reg = <0>; 11 reg = <0>;
11 }; 12 };
12 cpu@1 { 13 cpu@1 {
14 device_type = "cpu";
13 compatible = "arm,cortex-a9"; 15 compatible = "arm,cortex-a9";
14 reg = <1>; 16 reg = <1>;
15 }; 17 };
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
index 2fcb3f2ca160..5592be6f2f7a 100644
--- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi
+++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
@@ -457,6 +457,7 @@
457 }; 457 };
458 458
459 usb-phy@c5004000 { 459 usb-phy@c5004000 {
460 status = "okay";
460 nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) 461 nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
461 GPIO_ACTIVE_LOW>; 462 GPIO_ACTIVE_LOW>;
462 }; 463 };
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index b3034da00a37..ae6a17aed9ee 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -47,6 +47,12 @@
47 regulator-max-microvolt = <3150000>; 47 regulator-max-microvolt = <3150000>;
48 }; 48 };
49 49
50 vmmc2: regulator-vmmc2 {
51 compatible = "ti,twl4030-vmmc2";
52 regulator-min-microvolt = <1850000>;
53 regulator-max-microvolt = <3150000>;
54 };
55
50 vusb1v5: regulator-vusb1v5 { 56 vusb1v5: regulator-vusb1v5 {
51 compatible = "ti,twl4030-vusb1v5"; 57 compatible = "ti,twl4030-vusb1v5";
52 }; 58 };
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index e1eb7dadda80..67d929cf9804 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -442,8 +442,8 @@
442 compatible = "fsl,mvf600-fec"; 442 compatible = "fsl,mvf600-fec";
443 reg = <0x400d0000 0x1000>; 443 reg = <0x400d0000 0x1000>;
444 interrupts = <0 78 0x04>; 444 interrupts = <0 78 0x04>;
445 clocks = <&clks VF610_CLK_ENET>, 445 clocks = <&clks VF610_CLK_ENET0>,
446 <&clks VF610_CLK_ENET>, 446 <&clks VF610_CLK_ENET0>,
447 <&clks VF610_CLK_ENET>; 447 <&clks VF610_CLK_ENET>;
448 clock-names = "ipg", "ahb", "ptp"; 448 clock-names = "ipg", "ahb", "ptp";
449 status = "disabled"; 449 status = "disabled";
@@ -453,8 +453,8 @@
453 compatible = "fsl,mvf600-fec"; 453 compatible = "fsl,mvf600-fec";
454 reg = <0x400d1000 0x1000>; 454 reg = <0x400d1000 0x1000>;
455 interrupts = <0 79 0x04>; 455 interrupts = <0 79 0x04>;
456 clocks = <&clks VF610_CLK_ENET>, 456 clocks = <&clks VF610_CLK_ENET1>,
457 <&clks VF610_CLK_ENET>, 457 <&clks VF610_CLK_ENET1>,
458 <&clks VF610_CLK_ENET>; 458 <&clks VF610_CLK_ENET>;
459 clock-names = "ipg", "ahb", "ptp"; 459 clock-names = "ipg", "ahb", "ptp";
460 status = "disabled"; 460 status = "disabled";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index a432e6c1dac1..39ad030ac0c7 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -26,7 +26,6 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/edma.h> 28#include <linux/edma.h>
29#include <linux/err.h>
30#include <linux/of_address.h> 29#include <linux/of_address.h>
31#include <linux/of_device.h> 30#include <linux/of_device.h>
32#include <linux/of_dma.h> 31#include <linux/of_dma.h>
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 510e5b13aa2e..1bc34c7567fd 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,7 +19,7 @@
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) 22static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
23{ 23{
24 unsigned int mpidr, pcpu, pcluster, ret; 24 unsigned int mpidr, pcpu, pcluster, ret;
25 extern void secondary_startup(void); 25 extern void secondary_startup(void);
@@ -40,7 +40,7 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
40 return 0; 40 return 0;
41} 41}
42 42
43static void __cpuinit mcpm_secondary_init(unsigned int cpu) 43static void mcpm_secondary_init(unsigned int cpu)
44{ 44{
45 mcpm_cpu_powered_up(); 45 mcpm_cpu_powered_up();
46} 46}
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index 7c868139bdb0..1571bea48bed 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -102,6 +102,8 @@ CONFIG_SND_SOC=m
102CONFIG_SND_DAVINCI_SOC=m 102CONFIG_SND_DAVINCI_SOC=m
103# CONFIG_HID_SUPPORT is not set 103# CONFIG_HID_SUPPORT is not set
104# CONFIG_USB_SUPPORT is not set 104# CONFIG_USB_SUPPORT is not set
105CONFIG_DMADEVICES=y
106CONFIG_TI_EDMA=y
105CONFIG_EXT2_FS=y 107CONFIG_EXT2_FS=y
106CONFIG_EXT3_FS=y 108CONFIG_EXT3_FS=y
107CONFIG_XFS_FS=m 109CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index c86fd75e181a..ab2f7378352c 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -162,6 +162,8 @@ CONFIG_LEDS_TRIGGERS=y
162CONFIG_LEDS_TRIGGER_TIMER=m 162CONFIG_LEDS_TRIGGER_TIMER=m
163CONFIG_LEDS_TRIGGER_HEARTBEAT=m 163CONFIG_LEDS_TRIGGER_HEARTBEAT=m
164CONFIG_RTC_CLASS=y 164CONFIG_RTC_CLASS=y
165CONFIG_DMADEVICES=y
166CONFIG_TI_EDMA=y
165CONFIG_EXT2_FS=y 167CONFIG_EXT2_FS=y
166CONFIG_EXT3_FS=y 168CONFIG_EXT3_FS=y
167CONFIG_XFS_FS=m 169CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index fe0bdc361d2c..6e572c64cf5a 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -53,6 +53,7 @@ CONFIG_IP_PNP=y
53CONFIG_IP_PNP_DHCP=y 53CONFIG_IP_PNP_DHCP=y
54CONFIG_DEVTMPFS=y 54CONFIG_DEVTMPFS=y
55CONFIG_DEVTMPFS_MOUNT=y 55CONFIG_DEVTMPFS_MOUNT=y
56CONFIG_OMAP_OCP2SCP=y
56CONFIG_BLK_DEV_SD=y 57CONFIG_BLK_DEV_SD=y
57CONFIG_ATA=y 58CONFIG_ATA=y
58CONFIG_SATA_AHCI_PLATFORM=y 59CONFIG_SATA_AHCI_PLATFORM=y
@@ -61,6 +62,7 @@ CONFIG_SATA_MV=y
61CONFIG_NETDEVICES=y 62CONFIG_NETDEVICES=y
62CONFIG_SUN4I_EMAC=y 63CONFIG_SUN4I_EMAC=y
63CONFIG_NET_CALXEDA_XGMAC=y 64CONFIG_NET_CALXEDA_XGMAC=y
65CONFIG_KS8851=y
64CONFIG_SMSC911X=y 66CONFIG_SMSC911X=y
65CONFIG_STMMAC_ETH=y 67CONFIG_STMMAC_ETH=y
66CONFIG_MDIO_SUN4I=y 68CONFIG_MDIO_SUN4I=y
@@ -89,6 +91,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y
89CONFIG_I2C_SIRF=y 91CONFIG_I2C_SIRF=y
90CONFIG_I2C_TEGRA=y 92CONFIG_I2C_TEGRA=y
91CONFIG_SPI=y 93CONFIG_SPI=y
94CONFIG_SPI_OMAP24XX=y
92CONFIG_SPI_PL022=y 95CONFIG_SPI_PL022=y
93CONFIG_SPI_SIRF=y 96CONFIG_SPI_SIRF=y
94CONFIG_SPI_TEGRA114=y 97CONFIG_SPI_TEGRA114=y
@@ -111,11 +114,12 @@ CONFIG_FB_SIMPLE=y
111CONFIG_USB=y 114CONFIG_USB=y
112CONFIG_USB_XHCI_HCD=y 115CONFIG_USB_XHCI_HCD=y
113CONFIG_USB_EHCI_HCD=y 116CONFIG_USB_EHCI_HCD=y
114CONFIG_USB_EHCI_MXC=y
115CONFIG_USB_EHCI_TEGRA=y 117CONFIG_USB_EHCI_TEGRA=y
116CONFIG_USB_EHCI_HCD_PLATFORM=y 118CONFIG_USB_EHCI_HCD_PLATFORM=y
117CONFIG_USB_ISP1760_HCD=y 119CONFIG_USB_ISP1760_HCD=y
118CONFIG_USB_STORAGE=y 120CONFIG_USB_STORAGE=y
121CONFIG_USB_CHIPIDEA=y
122CONFIG_USB_CHIPIDEA_HOST=y
119CONFIG_AB8500_USB=y 123CONFIG_AB8500_USB=y
120CONFIG_NOP_USB_XCEIV=y 124CONFIG_NOP_USB_XCEIV=y
121CONFIG_OMAP_USB2=y 125CONFIG_OMAP_USB2=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 35f8cf299fa2..263ae3869e32 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -1,6 +1,8 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
4CONFIG_NO_HZ_IDLE=y
5CONFIG_HIGH_RES_TIMERS=y
4CONFIG_IKCONFIG=y 6CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
@@ -48,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
48CONFIG_MTD=y 50CONFIG_MTD=y
49CONFIG_MTD_TESTS=m 51CONFIG_MTD_TESTS=m
50CONFIG_MTD_CMDLINE_PARTS=y 52CONFIG_MTD_CMDLINE_PARTS=y
51CONFIG_MTD_CHAR=y
52CONFIG_MTD_BLOCK=y 53CONFIG_MTD_BLOCK=y
53CONFIG_MTD_NAND_ECC_SMC=y 54CONFIG_MTD_NAND_ECC_SMC=y
54CONFIG_MTD_NAND=y 55CONFIG_MTD_NAND=y
@@ -94,8 +95,10 @@ CONFIG_I2C_GPIO=y
94CONFIG_I2C_NOMADIK=y 95CONFIG_I2C_NOMADIK=y
95CONFIG_DEBUG_GPIO=y 96CONFIG_DEBUG_GPIO=y
96# CONFIG_HWMON is not set 97# CONFIG_HWMON is not set
98CONFIG_REGULATOR=y
97CONFIG_MMC=y 99CONFIG_MMC=y
98CONFIG_MMC_CLKGATE=y 100CONFIG_MMC_UNSAFE_RESUME=y
101# CONFIG_MMC_BLOCK_BOUNCE is not set
99CONFIG_MMC_ARMMMCI=y 102CONFIG_MMC_ARMMMCI=y
100CONFIG_NEW_LEDS=y 103CONFIG_NEW_LEDS=y
101CONFIG_LEDS_CLASS=y 104CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70c..000000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/* a.out coredump register dumper
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_A_OUT_CORE_H
13#define _ASM_A_OUT_CORE_H
14
15#ifdef __KERNEL__
16
17#include <linux/user.h>
18#include <linux/elfcore.h>
19
20/*
21 * fill in the user structure for an a.out core dump
22 */
23static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
24{
25 struct task_struct *tsk = current;
26
27 dump->magic = CMAGIC;
28 dump->start_code = tsk->mm->start_code;
29 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
30
31 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
32 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
33 dump->u_ssize = 0;
34
35 memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
36
37 if (dump->start_stack < 0x04000000)
38 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
39
40 dump->regs = *regs;
41 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
42}
43
44#endif /* __KERNEL__ */
45#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index accefe099182..e406d575c94f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -89,7 +89,7 @@ static inline u64 arch_counter_get_cntvct(void)
89 return cval; 89 return cval;
90} 90}
91 91
92static inline void __cpuinit arch_counter_set_user_access(void) 92static inline void arch_counter_set_user_access(void)
93{ 93{
94 u32 cntkctl; 94 u32 cntkctl;
95 95
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c25dc4e9851..9672e978d50d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -89,13 +89,18 @@ extern unsigned int processor_id;
89 __val; \ 89 __val; \
90 }) 90 })
91 91
92/*
93 * The memory clobber prevents gcc 4.5 from reordering the mrc before
94 * any is_smp() tests, which can cause undefined instruction aborts on
95 * ARM1136 r0 due to the missing extended CP15 registers.
96 */
92#define read_cpuid_ext(ext_reg) \ 97#define read_cpuid_ext(ext_reg) \
93 ({ \ 98 ({ \
94 unsigned int __val; \ 99 unsigned int __val; \
95 asm("mrc p15, 0, %0, c0, " ext_reg \ 100 asm("mrc p15, 0, %0, c0, " ext_reg \
96 : "=r" (__val) \ 101 : "=r" (__val) \
97 : \ 102 : \
98 : "cc"); \ 103 : "memory"); \
99 __val; \ 104 __val; \
100 }) 105 })
101 106
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c4800..56211f2084ef 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,4 +130,10 @@ struct mm_struct;
130extern unsigned long arch_randomize_brk(struct mm_struct *mm); 130extern unsigned long arch_randomize_brk(struct mm_struct *mm);
131#define arch_randomize_brk arch_randomize_brk 131#define arch_randomize_brk arch_randomize_brk
132 132
133#ifdef CONFIG_MMU
134#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
135struct linux_binprm;
136int arch_setup_additional_pages(struct linux_binprm *, int);
137#endif
138
133#endif 139#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index e3d55547e755..6f18da09668b 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,8 +6,11 @@
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 atomic64_t id; 8 atomic64_t id;
9#else
10 int switch_pending;
9#endif 11#endif
10 unsigned int vmalloc_seq; 12 unsigned int vmalloc_seq;
13 unsigned long sigpage;
11} mm_context_t; 14} mm_context_t;
12 15
13#ifdef CONFIG_CPU_HAS_ASID 16#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index b5792b7fd8d3..9b32f76bb0dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
56 * on non-ASID CPUs, the old mm will remain valid until the 56 * on non-ASID CPUs, the old mm will remain valid until the
57 * finish_arch_post_lock_switch() call. 57 * finish_arch_post_lock_switch() call.
58 */ 58 */
59 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); 59 mm->context.switch_pending = 1;
60 else 60 else
61 cpu_switch_mm(mm->pgd, mm); 61 cpu_switch_mm(mm->pgd, mm);
62} 62}
@@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
65 finish_arch_post_lock_switch 65 finish_arch_post_lock_switch
66static inline void finish_arch_post_lock_switch(void) 66static inline void finish_arch_post_lock_switch(void)
67{ 67{
68 if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { 68 struct mm_struct *mm = current->mm;
69 struct mm_struct *mm = current->mm; 69
70 cpu_switch_mm(mm->pgd, mm); 70 if (mm && mm->context.switch_pending) {
71 /*
72 * Preemption must be disabled during cpu_switch_mm() as we
73 * have some stateful cache flush implementations. Check
74 * switch_pending again in case we were preempted and the
75 * switch to this mm was already done.
76 */
77 preempt_disable();
78 if (mm->context.switch_pending) {
79 mm->context.switch_pending = 0;
80 cpu_switch_mm(mm->pgd, mm);
81 }
82 preempt_enable_no_resched();
71 } 83 }
72} 84}
73 85
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 6363f3d1d505..4355f0ec44d6 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
142#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 142#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
143extern void copy_page(void *to, const void *from); 143extern void copy_page(void *to, const void *from);
144 144
145#ifdef CONFIG_KUSER_HELPERS
145#define __HAVE_ARCH_GATE_AREA 1 146#define __HAVE_ARCH_GATE_AREA 1
147#endif
146 148
147#ifdef CONFIG_ARM_LPAE 149#ifdef CONFIG_ARM_LPAE
148#include <asm/pgtable-3level-types.h> 150#include <asm/pgtable-3level-types.h>
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaac..413f3876341c 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -54,7 +54,6 @@ struct thread_struct {
54 54
55#define start_thread(regs,pc,sp) \ 55#define start_thread(regs,pc,sp) \
56({ \ 56({ \
57 unsigned long *stack = (unsigned long *)sp; \
58 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 57 memset(regs->uregs, 0, sizeof(regs->uregs)); \
59 if (current->personality & ADDR_LIMIT_32BIT) \ 58 if (current->personality & ADDR_LIMIT_32BIT) \
60 regs->ARM_cpsr = USR_MODE; \ 59 regs->ARM_cpsr = USR_MODE; \
@@ -65,9 +64,6 @@ struct thread_struct {
65 regs->ARM_cpsr |= PSR_ENDSTATE; \ 64 regs->ARM_cpsr |= PSR_ENDSTATE; \
66 regs->ARM_pc = pc & ~1; /* pc */ \ 65 regs->ARM_pc = pc & ~1; /* pc */ \
67 regs->ARM_sp = sp; /* sp */ \ 66 regs->ARM_sp = sp; /* sp */ \
68 regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
69 regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
70 regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
71 nommu_start_thread(regs); \ 67 nommu_start_thread(regs); \
72}) 68})
73 69
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 214d4158089a..2b8114fcba09 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
156#define TIF_USING_IWMMXT 17 156#define TIF_USING_IWMMXT 17
157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
158#define TIF_RESTORE_SIGMASK 20 158#define TIF_RESTORE_SIGMASK 20
159#define TIF_SWITCH_MM 22 /* deferred switch_mm */
160 159
161#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 160#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
162#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 161#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index fdbb9e369745..f467e9b3f8d5 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void)
443 isb(); 443 isb();
444} 444}
445 445
446#include <asm/cputype.h>
446#ifdef CONFIG_ARM_ERRATA_798181 447#ifdef CONFIG_ARM_ERRATA_798181
448static inline int erratum_a15_798181(void)
449{
450 unsigned int midr = read_cpuid_id();
451
452 /* Cortex-A15 r0p0..r3p2 affected */
453 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
454 return 0;
455 return 1;
456}
457
447static inline void dummy_flush_tlb_a15_erratum(void) 458static inline void dummy_flush_tlb_a15_erratum(void)
448{ 459{
449 /* 460 /*
@@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void)
453 dsb(); 464 dsb();
454} 465}
455#else 466#else
467static inline int erratum_a15_798181(void)
468{
469 return 0;
470}
471
456static inline void dummy_flush_tlb_a15_erratum(void) 472static inline void dummy_flush_tlb_a15_erratum(void)
457{ 473{
458} 474}
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 50af92bac737..4371f45c5784 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -29,6 +29,7 @@
29#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT 29#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#include <asm/cacheflush.h>
32 33
33#ifdef CONFIG_ARM_VIRT_EXT 34#ifdef CONFIG_ARM_VIRT_EXT
34/* 35/*
@@ -41,10 +42,21 @@
41 */ 42 */
42extern int __boot_cpu_mode; 43extern int __boot_cpu_mode;
43 44
45static inline void sync_boot_mode(void)
46{
47 /*
48 * As secondaries write to __boot_cpu_mode with caches disabled, we
49 * must flush the corresponding cache entries to ensure the visibility
50 * of their writes.
51 */
52 sync_cache_r(&__boot_cpu_mode);
53}
54
44void __hyp_set_vectors(unsigned long phys_vector_base); 55void __hyp_set_vectors(unsigned long phys_vector_base);
45unsigned long __hyp_get_vectors(void); 56unsigned long __hyp_get_vectors(void);
46#else 57#else
47#define __boot_cpu_mode (SVC_MODE) 58#define __boot_cpu_mode (SVC_MODE)
59#define sync_boot_mode()
48#endif 60#endif
49 61
50#ifndef ZIMAGE 62#ifndef ZIMAGE
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 47bcb2d254af..18d76fd5a2af 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += a.out.h
5header-y += byteorder.h 4header-y += byteorder.h
6header-y += fcntl.h 5header-y += fcntl.h
7header-y += hwcap.h 6header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644
index 083894b2e3bc..000000000000
--- a/arch/arm/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __ARM_A_OUT_H__
2#define __ARM_A_OUT_H__
3
4#include <linux/personality.h>
5#include <linux/types.h>
6
7struct exec
8{
9 __u32 a_info; /* Use macros N_MAGIC, etc for access */
10 __u32 a_text; /* length of text, in bytes */
11 __u32 a_data; /* length of data, in bytes */
12 __u32 a_bss; /* length of uninitialized data area for file, in bytes */
13 __u32 a_syms; /* length of symbol table data in file, in bytes */
14 __u32 a_entry; /* start address */
15 __u32 a_trsize; /* length of relocation info for text, in bytes */
16 __u32 a_drsize; /* length of relocation info for data, in bytes */
17};
18
19/*
20 * This is always the same
21 */
22#define N_TXTADDR(a) (0x00008000)
23
24#define N_TRSIZE(a) ((a).a_trsize)
25#define N_DRSIZE(a) ((a).a_drsize)
26#define N_SYMSIZE(a) ((a).a_syms)
27
28#define M_ARM 103
29
30#ifndef LIBRARY_START_TEXT
31#define LIBRARY_START_TEXT (0x00c00000)
32#endif
33
34#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a39cfc2a1f90..d40d0ef389db 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -742,6 +742,18 @@ ENDPROC(__switch_to)
742#endif 742#endif
743 .endm 743 .endm
744 744
745 .macro kuser_pad, sym, size
746 .if (. - \sym) & 3
747 .rept 4 - (. - \sym) & 3
748 .byte 0
749 .endr
750 .endif
751 .rept (\size - (. - \sym)) / 4
752 .word 0xe7fddef1
753 .endr
754 .endm
755
756#ifdef CONFIG_KUSER_HELPERS
745 .align 5 757 .align 5
746 .globl __kuser_helper_start 758 .globl __kuser_helper_start
747__kuser_helper_start: 759__kuser_helper_start:
@@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup:
832#error "incoherent kernel configuration" 844#error "incoherent kernel configuration"
833#endif 845#endif
834 846
835 /* pad to next slot */ 847 kuser_pad __kuser_cmpxchg64, 64
836 .rept (16 - (. - __kuser_cmpxchg64)/4)
837 .word 0
838 .endr
839
840 .align 5
841 848
842__kuser_memory_barrier: @ 0xffff0fa0 849__kuser_memory_barrier: @ 0xffff0fa0
843 smp_dmb arm 850 smp_dmb arm
844 usr_ret lr 851 usr_ret lr
845 852
846 .align 5 853 kuser_pad __kuser_memory_barrier, 32
847 854
848__kuser_cmpxchg: @ 0xffff0fc0 855__kuser_cmpxchg: @ 0xffff0fc0
849 856
@@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup:
916 923
917#endif 924#endif
918 925
919 .align 5 926 kuser_pad __kuser_cmpxchg, 32
920 927
921__kuser_get_tls: @ 0xffff0fe0 928__kuser_get_tls: @ 0xffff0fe0
922 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 929 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
923 usr_ret lr 930 usr_ret lr
924 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code 931 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
925 .rep 4 932 kuser_pad __kuser_get_tls, 16
933 .rep 3
926 .word 0 @ 0xffff0ff0 software TLS value, then 934 .word 0 @ 0xffff0ff0 software TLS value, then
927 .endr @ pad up to __kuser_helper_version 935 .endr @ pad up to __kuser_helper_version
928 936
@@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc
932 .globl __kuser_helper_end 940 .globl __kuser_helper_end
933__kuser_helper_end: 941__kuser_helper_end:
934 942
943#endif
944
935 THUMB( .thumb ) 945 THUMB( .thumb )
936 946
937/* 947/*
938 * Vector stubs. 948 * Vector stubs.
939 * 949 *
940 * This code is copied to 0xffff0200 so we can use branches in the 950 * This code is copied to 0xffff1000 so we can use branches in the
941 * vectors, rather than ldr's. Note that this code must not 951 * vectors, rather than ldr's. Note that this code must not exceed
942 * exceed 0x300 bytes. 952 * a page size.
943 * 953 *
944 * Common stub entry macro: 954 * Common stub entry macro:
945 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 955 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -986,8 +996,17 @@ ENDPROC(vector_\name)
9861: 9961:
987 .endm 997 .endm
988 998
989 .globl __stubs_start 999 .section .stubs, "ax", %progbits
990__stubs_start: 1000__stubs_start:
1001 @ This must be the first word
1002 .word vector_swi
1003
1004vector_rst:
1005 ARM( swi SYS_ERROR0 )
1006 THUMB( svc #0 )
1007 THUMB( nop )
1008 b vector_und
1009
991/* 1010/*
992 * Interrupt dispatcher 1011 * Interrupt dispatcher
993 */ 1012 */
@@ -1082,6 +1101,16 @@ __stubs_start:
1082 .align 5 1101 .align 5
1083 1102
1084/*============================================================================= 1103/*=============================================================================
1104 * Address exception handler
1105 *-----------------------------------------------------------------------------
1106 * These aren't too critical.
1107 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1108 */
1109
1110vector_addrexcptn:
1111 b vector_addrexcptn
1112
1113/*=============================================================================
1085 * Undefined FIQs 1114 * Undefined FIQs
1086 *----------------------------------------------------------------------------- 1115 *-----------------------------------------------------------------------------
1087 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC 1116 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1094,45 +1123,19 @@ __stubs_start:
1094vector_fiq: 1123vector_fiq:
1095 subs pc, lr, #4 1124 subs pc, lr, #4
1096 1125
1097/*============================================================================= 1126 .globl vector_fiq_offset
1098 * Address exception handler 1127 .equ vector_fiq_offset, vector_fiq
1099 *-----------------------------------------------------------------------------
1100 * These aren't too critical.
1101 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1102 */
1103
1104vector_addrexcptn:
1105 b vector_addrexcptn
1106
1107/*
1108 * We group all the following data together to optimise
1109 * for CPUs with separate I & D caches.
1110 */
1111 .align 5
1112
1113.LCvswi:
1114 .word vector_swi
1115
1116 .globl __stubs_end
1117__stubs_end:
1118
1119 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1120 1128
1121 .globl __vectors_start 1129 .section .vectors, "ax", %progbits
1122__vectors_start: 1130__vectors_start:
1123 ARM( swi SYS_ERROR0 ) 1131 W(b) vector_rst
1124 THUMB( svc #0 ) 1132 W(b) vector_und
1125 THUMB( nop ) 1133 W(ldr) pc, __vectors_start + 0x1000
1126 W(b) vector_und + stubs_offset 1134 W(b) vector_pabt
1127 W(ldr) pc, .LCvswi + stubs_offset 1135 W(b) vector_dabt
1128 W(b) vector_pabt + stubs_offset 1136 W(b) vector_addrexcptn
1129 W(b) vector_dabt + stubs_offset 1137 W(b) vector_irq
1130 W(b) vector_addrexcptn + stubs_offset 1138 W(b) vector_fiq
1131 W(b) vector_irq + stubs_offset
1132 W(b) vector_fiq + stubs_offset
1133
1134 .globl __vectors_end
1135__vectors_end:
1136 1139
1137 .data 1140 .data
1138 1141
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index e00621f1403f..52b26432c9a9 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -49,7 +49,7 @@ __irq_entry:
49 mov r1, sp 49 mov r1, sp
50 stmdb sp!, {lr} 50 stmdb sp!, {lr}
51 @ routine called with r0 = irq number, r1 = struct pt_regs * 51 @ routine called with r0 = irq number, r1 = struct pt_regs *
52 bl nvic_do_IRQ 52 bl nvic_handle_irq
53 53
54 pop {lr} 54 pop {lr}
55 @ 55 @
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712f..25442f451148 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
47#include <asm/irq.h> 47#include <asm/irq.h>
48#include <asm/traps.h> 48#include <asm/traps.h>
49 49
50#define FIQ_OFFSET ({ \
51 extern void *vector_fiq_offset; \
52 (unsigned)&vector_fiq_offset; \
53 })
54
50static unsigned long no_fiq_insn; 55static unsigned long no_fiq_insn;
51 56
52/* Default reacquire function 57/* Default reacquire function
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
80void set_fiq_handler(void *start, unsigned int length) 85void set_fiq_handler(void *start, unsigned int length)
81{ 86{
82#if defined(CONFIG_CPU_USE_DOMAINS) 87#if defined(CONFIG_CPU_USE_DOMAINS)
83 memcpy((void *)0xffff001c, start, length); 88 void *base = (void *)0xffff0000;
84#else 89#else
85 memcpy(vectors_page + 0x1c, start, length); 90 void *base = vectors_page;
86#endif 91#endif
87 flush_icache_range(0xffff001c, 0xffff001c + length); 92 unsigned offset = FIQ_OFFSET;
93
94 memcpy(base + offset, start, length);
95 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
88 if (!vectors_high()) 96 if (!vectors_high())
89 flush_icache_range(0x1c, 0x1c + length); 97 flush_icache_range(offset, offset + length);
90} 98}
91 99
92int claim_fiq(struct fiq_handler *f) 100int claim_fiq(struct fiq_handler *f)
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
144 152
145void __init init_FIQ(int start) 153void __init init_FIQ(int start)
146{ 154{
147 no_fiq_insn = *(unsigned long *)0xffff001c; 155 unsigned offset = FIQ_OFFSET;
156 no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
148 fiq_start = start; 157 fiq_start = start;
149} 158}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 76ab5ca50610..47cd974e57ea 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -149,7 +149,6 @@ ENDPROC(lookup_processor_type)
149 * r5 = proc_info pointer in physical address space 149 * r5 = proc_info pointer in physical address space
150 * r9 = cpuid (preserved) 150 * r9 = cpuid (preserved)
151 */ 151 */
152 __CPUINIT
153__lookup_processor_type: 152__lookup_processor_type:
154 adr r3, __lookup_processor_type_data 153 adr r3, __lookup_processor_type_data
155 ldmia r3, {r4 - r6} 154 ldmia r3, {r4 - r6}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 75f14cc3e073..14235ba64a90 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -87,7 +87,7 @@ ENTRY(stext)
87ENDPROC(stext) 87ENDPROC(stext)
88 88
89#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
90 __CPUINIT 90 .text
91ENTRY(secondary_startup) 91ENTRY(secondary_startup)
92 /* 92 /*
93 * Common entry point for secondary CPUs. 93 * Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 45e8935cae4e..2c7cc1e03473 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -343,7 +343,7 @@ __turn_mmu_on_loc:
343 .long __turn_mmu_on_end 343 .long __turn_mmu_on_end
344 344
345#if defined(CONFIG_SMP) 345#if defined(CONFIG_SMP)
346 __CPUINIT 346 .text
347ENTRY(secondary_startup) 347ENTRY(secondary_startup)
348 /* 348 /*
349 * Common entry point for secondary CPUs. 349 * Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..7b95de601357 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1020,7 +1020,7 @@ out_mdbgen:
1020 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 1020 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
1021} 1021}
1022 1022
1023static int __cpuinit dbg_reset_notify(struct notifier_block *self, 1023static int dbg_reset_notify(struct notifier_block *self,
1024 unsigned long action, void *cpu) 1024 unsigned long action, void *cpu)
1025{ 1025{
1026 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) 1026 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
1029 return NOTIFY_OK; 1029 return NOTIFY_OK;
1030} 1030}
1031 1031
1032static struct notifier_block __cpuinitdata dbg_reset_nb = { 1032static struct notifier_block dbg_reset_nb = {
1033 .notifier_call = dbg_reset_notify, 1033 .notifier_call = dbg_reset_notify,
1034}; 1034};
1035 1035
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 4910232c4833..797b1a6a4906 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode)
56 ldr \reg3, [\reg2] 56 ldr \reg3, [\reg2]
57 ldr \reg1, [\reg2, \reg3] 57 ldr \reg1, [\reg2, \reg3]
58 cmp \mode, \reg1 @ matches primary CPU boot mode? 58 cmp \mode, \reg1 @ matches primary CPU boot mode?
59 orrne r7, r7, #BOOT_CPU_MODE_MISMATCH 59 orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
60 strne r7, [r5, r6] @ record what happened and give up 60 strne \reg1, [\reg2, \reg3] @ record what happened and give up
61 .endm 61 .endm
62 62
63#else /* ZIMAGE */ 63#else /* ZIMAGE */
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..aebe0e99c153 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -157,8 +157,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
157 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 157 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
158 * junk values out of them. 158 * junk values out of them.
159 */ 159 */
160static int __cpuinit cpu_pmu_notify(struct notifier_block *b, 160static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
161 unsigned long action, void *hcpu) 161 void *hcpu)
162{ 162{
163 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) 163 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
164 return NOTIFY_DONE; 164 return NOTIFY_DONE;
@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
171 return NOTIFY_OK; 171 return NOTIFY_OK;
172} 172}
173 173
174static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { 174static struct notifier_block cpu_pmu_hotplug_notifier = {
175 .notifier_call = cpu_pmu_notify, 175 .notifier_call = cpu_pmu_notify,
176}; 176};
177 177
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d3ca4f6915af..536c85fe72a8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -197,6 +197,7 @@ void machine_shutdown(void)
197 */ 197 */
198void machine_halt(void) 198void machine_halt(void)
199{ 199{
200 local_irq_disable();
200 smp_send_stop(); 201 smp_send_stop();
201 202
202 local_irq_disable(); 203 local_irq_disable();
@@ -211,6 +212,7 @@ void machine_halt(void)
211 */ 212 */
212void machine_power_off(void) 213void machine_power_off(void)
213{ 214{
215 local_irq_disable();
214 smp_send_stop(); 216 smp_send_stop();
215 217
216 if (pm_power_off) 218 if (pm_power_off)
@@ -230,6 +232,7 @@ void machine_power_off(void)
230 */ 232 */
231void machine_restart(char *cmd) 233void machine_restart(char *cmd)
232{ 234{
235 local_irq_disable();
233 smp_send_stop(); 236 smp_send_stop();
234 237
235 arm_pm_restart(reboot_mode, cmd); 238 arm_pm_restart(reboot_mode, cmd);
@@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
426} 429}
427 430
428#ifdef CONFIG_MMU 431#ifdef CONFIG_MMU
432#ifdef CONFIG_KUSER_HELPERS
429/* 433/*
430 * The vectors page is always readable from user space for the 434 * The vectors page is always readable from user space for the
431 * atomic helpers and the signal restart code. Insert it into the 435 * atomic helpers. Insert it into the gate_vma so that it is visible
432 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. 436 * through ptrace and /proc/<pid>/mem.
433 */ 437 */
434static struct vm_area_struct gate_vma = { 438static struct vm_area_struct gate_vma = {
435 .vm_start = 0xffff0000, 439 .vm_start = 0xffff0000,
@@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr)
458{ 462{
459 return in_gate_area(NULL, addr); 463 return in_gate_area(NULL, addr);
460} 464}
465#define is_gate_vma(vma) ((vma) = &gate_vma)
466#else
467#define is_gate_vma(vma) 0
468#endif
461 469
462const char *arch_vma_name(struct vm_area_struct *vma) 470const char *arch_vma_name(struct vm_area_struct *vma)
463{ 471{
464 return (vma == &gate_vma) ? "[vectors]" : NULL; 472 return is_gate_vma(vma) ? "[vectors]" :
473 (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
474 "[sigpage]" : NULL;
475}
476
477static struct page *signal_page;
478extern struct page *get_signal_page(void);
479
480int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
481{
482 struct mm_struct *mm = current->mm;
483 unsigned long addr;
484 int ret;
485
486 if (!signal_page)
487 signal_page = get_signal_page();
488 if (!signal_page)
489 return -ENOMEM;
490
491 down_write(&mm->mmap_sem);
492 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
493 if (IS_ERR_VALUE(addr)) {
494 ret = addr;
495 goto up_fail;
496 }
497
498 ret = install_special_mapping(mm, addr, PAGE_SIZE,
499 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
500 &signal_page);
501
502 if (ret == 0)
503 mm->context.sigpage = addr;
504
505 up_fail:
506 up_write(&mm->mmap_sem);
507 return ret;
465} 508}
466#endif 509#endif
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 219f1d73572a..70ded3fb42d9 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -46,8 +46,7 @@
46 46
47extern void secondary_startup(void); 47extern void secondary_startup(void);
48 48
49static int __cpuinit psci_boot_secondary(unsigned int cpu, 49static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
50 struct task_struct *idle)
51{ 50{
52 if (psci_ops.cpu_on) 51 if (psci_ops.cpu_on)
53 return psci_ops.cpu_on(cpu_logical_map(cpu), 52 return psci_ops.cpu_on(cpu_logical_map(cpu),
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 63af9a7ae512..afc2489ee13b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
836void __init hyp_mode_check(void) 836void __init hyp_mode_check(void)
837{ 837{
838#ifdef CONFIG_ARM_VIRT_EXT 838#ifdef CONFIG_ARM_VIRT_EXT
839 sync_boot_mode();
840
839 if (is_hyp_mode_available()) { 841 if (is_hyp_mode_available()) {
840 pr_info("CPU: All CPU(s) started in HYP mode.\n"); 842 pr_info("CPU: All CPU(s) started in HYP mode.\n");
841 pr_info("CPU: Virtualization extensions available.\n"); 843 pr_info("CPU: Virtualization extensions available.\n");
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = {
971 "vfpv4", 973 "vfpv4",
972 "idiva", 974 "idiva",
973 "idivt", 975 "idivt",
976 "vfpd32",
974 "lpae", 977 "lpae",
975 NULL 978 NULL
976}; 979};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 1c16c35c271a..ab3304225272 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/random.h>
11#include <linux/signal.h> 12#include <linux/signal.h>
12#include <linux/personality.h> 13#include <linux/personality.h>
13#include <linux/uaccess.h> 14#include <linux/uaccess.h>
@@ -15,12 +16,11 @@
15 16
16#include <asm/elf.h> 17#include <asm/elf.h>
17#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/traps.h>
18#include <asm/ucontext.h> 20#include <asm/ucontext.h>
19#include <asm/unistd.h> 21#include <asm/unistd.h>
20#include <asm/vfp.h> 22#include <asm/vfp.h>
21 23
22#include "signal.h"
23
24/* 24/*
25 * For ARM syscalls, we encode the syscall number into the instruction. 25 * For ARM syscalls, we encode the syscall number into the instruction.
26 */ 26 */
@@ -40,11 +40,13 @@
40#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 40#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
41#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 41#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
42 42
43const unsigned long sigreturn_codes[7] = { 43static const unsigned long sigreturn_codes[7] = {
44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, 45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
46}; 46};
47 47
48static unsigned long signal_return_offset;
49
48#ifdef CONFIG_CRUNCH 50#ifdef CONFIG_CRUNCH
49static int preserve_crunch_context(struct crunch_sigframe __user *frame) 51static int preserve_crunch_context(struct crunch_sigframe __user *frame)
50{ 52{
@@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
400 __put_user(sigreturn_codes[idx+1], rc+1)) 402 __put_user(sigreturn_codes[idx+1], rc+1))
401 return 1; 403 return 1;
402 404
403 if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { 405#ifdef CONFIG_MMU
406 if (cpsr & MODE32_BIT) {
407 struct mm_struct *mm = current->mm;
408
404 /* 409 /*
405 * 32-bit code can use the new high-page 410 * 32-bit code can use the signal return page
406 * signal return code support except when the MPU has 411 * except when the MPU has protected the vectors
407 * protected the vectors page from PL0 412 * page from PL0
408 */ 413 */
409 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; 414 retcode = mm->context.sigpage + signal_return_offset +
410 } else { 415 (idx << 2) + thumb;
416 } else
417#endif
418 {
411 /* 419 /*
412 * Ensure that the instruction cache sees 420 * Ensure that the instruction cache sees
413 * the return code written onto the stack. 421 * the return code written onto the stack.
@@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
608 } while (thread_flags & _TIF_WORK_MASK); 616 } while (thread_flags & _TIF_WORK_MASK);
609 return 0; 617 return 0;
610} 618}
619
620struct page *get_signal_page(void)
621{
622 unsigned long ptr;
623 unsigned offset;
624 struct page *page;
625 void *addr;
626
627 page = alloc_pages(GFP_KERNEL, 0);
628
629 if (!page)
630 return NULL;
631
632 addr = page_address(page);
633
634 /* Give the signal return code some randomness */
635 offset = 0x200 + (get_random_int() & 0x7fc);
636 signal_return_offset = offset;
637
638 /*
639 * Copy signal return handlers into the vector page, and
640 * set sigreturn to be a pointer to these.
641 */
642 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
643
644 ptr = (unsigned long)addr + offset;
645 flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
646
647 return page;
648}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
deleted file mode 100644
index 5ff067b7c752..000000000000
--- a/arch/arm/kernel/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * linux/arch/arm/kernel/signal.h
3 *
4 * Copyright (C) 2005-2009 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
11
12extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c5fb5469054b..c2b4f8f0be9a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,7 +58,7 @@ struct secondary_data secondary_data;
58 * control for which core is the next to come out of the secondary 58 * control for which core is the next to come out of the secondary
59 * boot "holding pen" 59 * boot "holding pen"
60 */ 60 */
61volatile int __cpuinitdata pen_release = -1; 61volatile int pen_release = -1;
62 62
63enum ipi_msg_type { 63enum ipi_msg_type {
64 IPI_WAKEUP, 64 IPI_WAKEUP,
@@ -86,7 +86,7 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
86 return pgdir >> ARCH_PGD_SHIFT; 86 return pgdir >> ARCH_PGD_SHIFT;
87} 87}
88 88
89int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 89int __cpu_up(unsigned int cpu, struct task_struct *idle)
90{ 90{
91 int ret; 91 int ret;
92 92
@@ -138,7 +138,7 @@ void __init smp_init_cpus(void)
138 smp_ops.smp_init_cpus(); 138 smp_ops.smp_init_cpus();
139} 139}
140 140
141int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 141int boot_secondary(unsigned int cpu, struct task_struct *idle)
142{ 142{
143 if (smp_ops.smp_boot_secondary) 143 if (smp_ops.smp_boot_secondary)
144 return smp_ops.smp_boot_secondary(cpu, idle); 144 return smp_ops.smp_boot_secondary(cpu, idle);
@@ -170,7 +170,7 @@ static int platform_cpu_disable(unsigned int cpu)
170/* 170/*
171 * __cpu_disable runs on the processor to be shutdown. 171 * __cpu_disable runs on the processor to be shutdown.
172 */ 172 */
173int __cpuinit __cpu_disable(void) 173int __cpu_disable(void)
174{ 174{
175 unsigned int cpu = smp_processor_id(); 175 unsigned int cpu = smp_processor_id();
176 int ret; 176 int ret;
@@ -216,7 +216,7 @@ static DECLARE_COMPLETION(cpu_died);
216 * called on the thread which is asking for a CPU to be shutdown - 216 * called on the thread which is asking for a CPU to be shutdown -
217 * waits until shutdown has completed, or it is timed out. 217 * waits until shutdown has completed, or it is timed out.
218 */ 218 */
219void __cpuinit __cpu_die(unsigned int cpu) 219void __cpu_die(unsigned int cpu)
220{ 220{
221 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { 221 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
222 pr_err("CPU%u: cpu didn't die\n", cpu); 222 pr_err("CPU%u: cpu didn't die\n", cpu);
@@ -306,7 +306,7 @@ void __ref cpu_die(void)
306 * Called by both boot and secondaries to move global data into 306 * Called by both boot and secondaries to move global data into
307 * per-processor storage. 307 * per-processor storage.
308 */ 308 */
309static void __cpuinit smp_store_cpu_info(unsigned int cpuid) 309static void smp_store_cpu_info(unsigned int cpuid)
310{ 310{
311 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 311 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
312 312
@@ -322,7 +322,7 @@ static void percpu_timer_setup(void);
322 * This is the secondary CPU boot entry. We're using this CPUs 322 * This is the secondary CPU boot entry. We're using this CPUs
323 * idle thread stack, but a set of temporary page tables. 323 * idle thread stack, but a set of temporary page tables.
324 */ 324 */
325asmlinkage void __cpuinit secondary_start_kernel(void) 325asmlinkage void secondary_start_kernel(void)
326{ 326{
327 struct mm_struct *mm = &init_mm; 327 struct mm_struct *mm = &init_mm;
328 unsigned int cpu; 328 unsigned int cpu;
@@ -521,7 +521,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode,
521{ 521{
522} 522}
523 523
524static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) 524static void broadcast_timer_setup(struct clock_event_device *evt)
525{ 525{
526 evt->name = "dummy_timer"; 526 evt->name = "dummy_timer";
527 evt->features = CLOCK_EVT_FEAT_ONESHOT | 527 evt->features = CLOCK_EVT_FEAT_ONESHOT |
@@ -550,7 +550,7 @@ int local_timer_register(struct local_timer_ops *ops)
550} 550}
551#endif 551#endif
552 552
553static void __cpuinit percpu_timer_setup(void) 553static void percpu_timer_setup(void)
554{ 554{
555 unsigned int cpu = smp_processor_id(); 555 unsigned int cpu = smp_processor_id();
556 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 556 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index a98b62dca2fa..c2edfff573c2 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored)
70 local_flush_bp_all(); 70 local_flush_bp_all();
71} 71}
72 72
73#ifdef CONFIG_ARM_ERRATA_798181
74static int erratum_a15_798181(void)
75{
76 unsigned int midr = read_cpuid_id();
77
78 /* Cortex-A15 r0p0..r3p2 affected */
79 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
80 return 0;
81 return 1;
82}
83#else
84static int erratum_a15_798181(void)
85{
86 return 0;
87}
88#endif
89
90static void ipi_flush_tlb_a15_erratum(void *arg) 73static void ipi_flush_tlb_a15_erratum(void *arg)
91{ 74{
92 dmb(); 75 dmb();
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index f6fd1d4398c6..25956204ef23 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -187,7 +187,7 @@ core_initcall(twd_cpufreq_init);
187 187
188#endif 188#endif
189 189
190static void __cpuinit twd_calibrate_rate(void) 190static void twd_calibrate_rate(void)
191{ 191{
192 unsigned long count; 192 unsigned long count;
193 u64 waitjiffies; 193 u64 waitjiffies;
@@ -265,7 +265,7 @@ static void twd_get_clock(struct device_node *np)
265/* 265/*
266 * Setup the local clock events for a CPU. 266 * Setup the local clock events for a CPU.
267 */ 267 */
268static int __cpuinit twd_timer_setup(struct clock_event_device *clk) 268static int twd_timer_setup(struct clock_event_device *clk)
269{ 269{
270 struct clock_event_device **this_cpu_clk; 270 struct clock_event_device **this_cpu_clk;
271 int cpu = smp_processor_id(); 271 int cpu = smp_processor_id();
@@ -308,7 +308,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
308 return 0; 308 return 0;
309} 309}
310 310
311static struct local_timer_ops twd_lt_ops __cpuinitdata = { 311static struct local_timer_ops twd_lt_ops = {
312 .setup = twd_timer_setup, 312 .setup = twd_timer_setup,
313 .stop = twd_timer_stop, 313 .stop = twd_timer_stop,
314}; 314};
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cab094c234ee..ab517fcce21b 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -35,8 +35,6 @@
35#include <asm/tls.h> 35#include <asm/tls.h>
36#include <asm/system_misc.h> 36#include <asm/system_misc.h>
37 37
38#include "signal.h"
39
40static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
41 39
42void *vectors_page; 40void *vectors_page;
@@ -800,15 +798,26 @@ void __init trap_init(void)
800 return; 798 return;
801} 799}
802 800
803static void __init kuser_get_tls_init(unsigned long vectors) 801#ifdef CONFIG_KUSER_HELPERS
802static void __init kuser_init(void *vectors)
804{ 803{
804 extern char __kuser_helper_start[], __kuser_helper_end[];
805 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
806
807 memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
808
805 /* 809 /*
806 * vectors + 0xfe0 = __kuser_get_tls 810 * vectors + 0xfe0 = __kuser_get_tls
807 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 811 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
808 */ 812 */
809 if (tls_emu || has_tls_reg) 813 if (tls_emu || has_tls_reg)
810 memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); 814 memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
811} 815}
816#else
817static void __init kuser_init(void *vectors)
818{
819}
820#endif
812 821
813void __init early_trap_init(void *vectors_base) 822void __init early_trap_init(void *vectors_base)
814{ 823{
@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base)
816 unsigned long vectors = (unsigned long)vectors_base; 825 unsigned long vectors = (unsigned long)vectors_base;
817 extern char __stubs_start[], __stubs_end[]; 826 extern char __stubs_start[], __stubs_end[];
818 extern char __vectors_start[], __vectors_end[]; 827 extern char __vectors_start[], __vectors_end[];
819 extern char __kuser_helper_start[], __kuser_helper_end[]; 828 unsigned i;
820 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
821 829
822 vectors_page = vectors_base; 830 vectors_page = vectors_base;
823 831
824 /* 832 /*
833 * Poison the vectors page with an undefined instruction. This
834 * instruction is chosen to be undefined for both ARM and Thumb
835 * ISAs. The Thumb version is an undefined instruction with a
836 * branch back to the undefined instruction.
837 */
838 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
839 ((u32 *)vectors_base)[i] = 0xe7fddef1;
840
841 /*
825 * Copy the vectors, stubs and kuser helpers (in entry-armv.S) 842 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
826 * into the vector page, mapped at 0xffff0000, and ensure these 843 * into the vector page, mapped at 0xffff0000, and ensure these
827 * are visible to the instruction stream. 844 * are visible to the instruction stream.
828 */ 845 */
829 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); 846 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
830 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); 847 memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
831 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
832 848
833 /* 849 kuser_init(vectors_base);
834 * Do processor specific fixups for the kuser helpers
835 */
836 kuser_get_tls_init(vectors);
837
838 /*
839 * Copy signal return handlers into the vector page, and
840 * set sigreturn to be a pointer to these.
841 */
842 memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
843 sigreturn_codes, sizeof(sigreturn_codes));
844 850
845 flush_icache_range(vectors, vectors + PAGE_SIZE); 851 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
846 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 852 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
847#else /* ifndef CONFIG_CPU_V7M */ 853#else /* ifndef CONFIG_CPU_V7M */
848 /* 854 /*
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index fa25e4e425f6..7bcee5c9b604 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -148,6 +148,23 @@ SECTIONS
148 . = ALIGN(PAGE_SIZE); 148 . = ALIGN(PAGE_SIZE);
149 __init_begin = .; 149 __init_begin = .;
150#endif 150#endif
151 /*
152 * The vectors and stubs are relocatable code, and the
153 * only thing that matters is their relative offsets
154 */
155 __vectors_start = .;
156 .vectors 0 : AT(__vectors_start) {
157 *(.vectors)
158 }
159 . = __vectors_start + SIZEOF(.vectors);
160 __vectors_end = .;
161
162 __stubs_start = .;
163 .stubs 0x1000 : AT(__stubs_start) {
164 *(.stubs)
165 }
166 . = __stubs_start + SIZEOF(.stubs);
167 __stubs_end = .;
151 168
152 INIT_TEXT_SECTION(8) 169 INIT_TEXT_SECTION(8)
153 .exit.text : { 170 .exit.text : {
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 64dbfa57204a..5306de350133 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -86,7 +86,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
86 } 86 }
87} 87}
88 88
89unsigned long __cpuinit calibrate_delay_is_known(void) 89unsigned long calibrate_delay_is_known(void)
90{ 90{
91 delay_calibrated = true; 91 delay_calibrated = true;
92 return lpj_fine; 92 return lpj_fine;
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index afbc439f11d4..4cdb61c54459 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -505,7 +505,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = {
505/* 505/*
506 * Amplifiers on the board 506 * Amplifiers on the board
507 */ 507 */
508struct ths7303_platform_data ths7303_pdata = { 508static struct ths7303_platform_data ths7303_pdata = {
509 .ch_1 = 3, 509 .ch_1 = 3,
510 .ch_2 = 3, 510 .ch_2 = 3,
511 .ch_3 = 3, 511 .ch_3 = 3,
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 42ef53f62c6c..86100d179694 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -860,7 +860,7 @@ static struct platform_device dm355_vpbe_display = {
860 }, 860 },
861}; 861};
862 862
863struct venc_platform_data dm355_venc_pdata = { 863static struct venc_platform_data dm355_venc_pdata = {
864 .setup_pinmux = dm355_vpbe_setup_pinmux, 864 .setup_pinmux = dm355_vpbe_setup_pinmux,
865 .setup_clock = dm355_venc_setup_clock, 865 .setup_clock = dm355_venc_setup_clock,
866}; 866};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index fa7af5eda52d..dad28029ba9b 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1349,7 +1349,7 @@ static struct platform_device dm365_vpbe_display = {
1349 }, 1349 },
1350}; 1350};
1351 1351
1352struct venc_platform_data dm365_venc_pdata = { 1352static struct venc_platform_data dm365_venc_pdata = {
1353 .setup_pinmux = dm365_vpbe_setup_pinmux, 1353 .setup_pinmux = dm365_vpbe_setup_pinmux,
1354 .setup_clock = dm365_venc_setup_clock, 1354 .setup_clock = dm365_venc_setup_clock,
1355}; 1355};
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 855d4a7b462d..5952e68c76c4 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -92,6 +92,7 @@ config SOC_EXYNOS5440
92 bool "SAMSUNG EXYNOS5440" 92 bool "SAMSUNG EXYNOS5440"
93 default y 93 default y
94 depends on ARCH_EXYNOS5 94 depends on ARCH_EXYNOS5
95 select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
95 select ARCH_HAS_OPP 96 select ARCH_HAS_OPP
96 select HAVE_ARM_ARCH_TIMER 97 select HAVE_ARM_ARCH_TIMER
97 select AUTO_ZRELADDR 98 select AUTO_ZRELADDR
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index e970a7a4e278..53696154aead 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -14,7 +14,7 @@ obj- :=
14 14
15obj-$(CONFIG_ARCH_EXYNOS) += common.o 15obj-$(CONFIG_ARCH_EXYNOS) += common.o
16 16
17obj-$(CONFIG_PM) += pm.o 17obj-$(CONFIG_S5P_PM) += pm.o
18obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o 18obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
19obj-$(CONFIG_CPU_IDLE) += cpuidle.o 19obj-$(CONFIG_CPU_IDLE) += cpuidle.o
20 20
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 164685bd25c8..ba95e5db2501 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -58,7 +58,6 @@ static const char name_exynos5440[] = "EXYNOS5440";
58 58
59static void exynos4_map_io(void); 59static void exynos4_map_io(void);
60static void exynos5_map_io(void); 60static void exynos5_map_io(void);
61static void exynos5440_map_io(void);
62static int exynos_init(void); 61static int exynos_init(void);
63 62
64static struct cpu_table cpu_ids[] __initdata = { 63static struct cpu_table cpu_ids[] __initdata = {
@@ -95,7 +94,6 @@ static struct cpu_table cpu_ids[] __initdata = {
95 }, { 94 }, {
96 .idcode = EXYNOS5440_SOC_ID, 95 .idcode = EXYNOS5440_SOC_ID,
97 .idmask = EXYNOS5_SOC_MASK, 96 .idmask = EXYNOS5_SOC_MASK,
98 .map_io = exynos5440_map_io,
99 .init = exynos_init, 97 .init = exynos_init,
100 .name = name_exynos5440, 98 .name = name_exynos5440,
101 }, 99 },
@@ -150,11 +148,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
150 .length = SZ_64K, 148 .length = SZ_64K,
151 .type = MT_DEVICE, 149 .type = MT_DEVICE,
152 }, { 150 }, {
153 .virtual = (unsigned long)S3C_VA_UART,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
155 .length = SZ_512K,
156 .type = MT_DEVICE,
157 }, {
158 .virtual = (unsigned long)S5P_VA_CMU, 151 .virtual = (unsigned long)S5P_VA_CMU,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU), 152 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
160 .length = SZ_128K, 153 .length = SZ_128K,
@@ -268,20 +261,6 @@ static struct map_desc exynos5_iodesc[] __initdata = {
268 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU), 261 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
269 .length = SZ_64K, 262 .length = SZ_64K,
270 .type = MT_DEVICE, 263 .type = MT_DEVICE,
271 }, {
272 .virtual = (unsigned long)S3C_VA_UART,
273 .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
274 .length = SZ_512K,
275 .type = MT_DEVICE,
276 },
277};
278
279static struct map_desc exynos5440_iodesc0[] __initdata = {
280 {
281 .virtual = (unsigned long)S3C_VA_UART,
282 .pfn = __phys_to_pfn(EXYNOS5440_PA_UART0),
283 .length = SZ_512K,
284 .type = MT_DEVICE,
285 }, 264 },
286}; 265};
287 266
@@ -388,11 +367,6 @@ static void __init exynos5_map_io(void)
388 iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc)); 367 iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc));
389} 368}
390 369
391static void __init exynos5440_map_io(void)
392{
393 iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
394}
395
396void __init exynos_init_time(void) 370void __init exynos_init_time(void)
397{ 371{
398 of_clk_init(NULL); 372 of_clk_init(NULL);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 3e156bcddcb4..972490fc09d6 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -97,6 +97,5 @@ struct exynos_pmu_conf {
97}; 97};
98 98
99extern void exynos_sys_powerdown_conf(enum sys_powerdown mode); 99extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
100extern void s3c_cpu_resume(void);
101 100
102#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */ 101#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 17a18ff3d71e..225ee8431c72 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -25,6 +25,7 @@
25#include <mach/regs-pmu.h> 25#include <mach/regs-pmu.h>
26 26
27#include <plat/cpu.h> 27#include <plat/cpu.h>
28#include <plat/pm.h>
28 29
29#include "common.h" 30#include "common.h"
30 31
diff --git a/arch/arm/mach-exynos/headsmp.S b/arch/arm/mach-exynos/headsmp.S
index 5364d4bfa8bc..cdd9d91e9933 100644
--- a/arch/arm/mach-exynos/headsmp.S
+++ b/arch/arm/mach-exynos/headsmp.S
@@ -13,8 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15 15
16 __CPUINIT
17
18/* 16/*
19 * exynos4 specific entry point for secondary CPUs. This provides 17 * exynos4 specific entry point for secondary CPUs. This provides
20 * a "holding pen" into which all secondary cores are held until we're 18 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-exynos/include/mach/memory.h b/arch/arm/mach-exynos/include/mach/memory.h
index 374ef2cf7152..2a4cdb7cb326 100644
--- a/arch/arm/mach-exynos/include/mach/memory.h
+++ b/arch/arm/mach-exynos/include/mach/memory.h
@@ -15,8 +15,13 @@
15 15
16#define PLAT_PHYS_OFFSET UL(0x40000000) 16#define PLAT_PHYS_OFFSET UL(0x40000000)
17 17
18#ifndef CONFIG_ARM_LPAE
18/* Maximum of 256MiB in one bank */ 19/* Maximum of 256MiB in one bank */
19#define MAX_PHYSMEM_BITS 32 20#define MAX_PHYSMEM_BITS 32
20#define SECTION_SIZE_BITS 28 21#define SECTION_SIZE_BITS 28
22#else
23#define MAX_PHYSMEM_BITS 36
24#define SECTION_SIZE_BITS 31
25#endif
21 26
22#endif /* __ASM_ARCH_MEMORY_H */ 27#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index deba1308ff16..58b43e6f9262 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -75,7 +75,7 @@ static void __iomem *scu_base_addr(void)
75 75
76static DEFINE_SPINLOCK(boot_lock); 76static DEFINE_SPINLOCK(boot_lock);
77 77
78static void __cpuinit exynos_secondary_init(unsigned int cpu) 78static void exynos_secondary_init(unsigned int cpu)
79{ 79{
80 /* 80 /*
81 * let the primary processor know we're out of the 81 * let the primary processor know we're out of the
@@ -90,7 +90,7 @@ static void __cpuinit exynos_secondary_init(unsigned int cpu)
90 spin_unlock(&boot_lock); 90 spin_unlock(&boot_lock);
91} 91}
92 92
93static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 93static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
94{ 94{
95 unsigned long timeout; 95 unsigned long timeout;
96 unsigned long phys_cpu = cpu_logical_map(cpu); 96 unsigned long phys_cpu = cpu_logical_map(cpu);
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 41c20692a13f..c679db577269 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -217,6 +217,9 @@ static __init int exynos_pm_drvinit(void)
217 struct clk *pll_base; 217 struct clk *pll_base;
218 unsigned int tmp; 218 unsigned int tmp;
219 219
220 if (soc_is_exynos5440())
221 return 0;
222
220 s3c_pm_init(); 223 s3c_pm_init();
221 224
222 /* All wakeup disable */ 225 /* All wakeup disable */
@@ -340,6 +343,9 @@ static struct syscore_ops exynos_pm_syscore_ops = {
340 343
341static __init int exynos_pm_syscore_init(void) 344static __init int exynos_pm_syscore_init(void)
342{ 345{
346 if (soc_is_exynos5440())
347 return 0;
348
343 register_syscore_ops(&exynos_pm_syscore_ops); 349 register_syscore_ops(&exynos_pm_syscore_ops);
344 return 0; 350 return 0;
345} 351}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index a7cd2cf5e08d..3490a24f969e 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -276,8 +276,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
276 276
277 sys->mem_offset = DC21285_PCI_MEM; 277 sys->mem_offset = DC21285_PCI_MEM;
278 278
279 pci_ioremap_io(0, DC21285_PCI_IO);
280
281 pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset); 279 pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
282 pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset); 280 pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
283 281
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index dc5d6becd8c7..88815795fe26 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -115,6 +115,7 @@ static int highbank_platform_notifier(struct notifier_block *nb,
115{ 115{
116 struct resource *res; 116 struct resource *res;
117 int reg = -1; 117 int reg = -1;
118 u32 val;
118 struct device *dev = __dev; 119 struct device *dev = __dev;
119 120
120 if (event != BUS_NOTIFY_ADD_DEVICE) 121 if (event != BUS_NOTIFY_ADD_DEVICE)
@@ -141,10 +142,10 @@ static int highbank_platform_notifier(struct notifier_block *nb,
141 return NOTIFY_DONE; 142 return NOTIFY_DONE;
142 143
143 if (of_property_read_bool(dev->of_node, "dma-coherent")) { 144 if (of_property_read_bool(dev->of_node, "dma-coherent")) {
144 writel(0xff31, sregs_base + reg); 145 val = readl(sregs_base + reg);
146 writel(val | 0xff01, sregs_base + reg);
145 set_dma_ops(dev, &arm_coherent_dma_ops); 147 set_dma_ops(dev, &arm_coherent_dma_ops);
146 } else 148 }
147 writel(0, sregs_base + reg);
148 149
149 return NOTIFY_OK; 150 return NOTIFY_OK;
150} 151}
diff --git a/arch/arm/mach-highbank/platsmp.c b/arch/arm/mach-highbank/platsmp.c
index a984573e0d02..32d75cf55cbc 100644
--- a/arch/arm/mach-highbank/platsmp.c
+++ b/arch/arm/mach-highbank/platsmp.c
@@ -24,7 +24,7 @@
24 24
25extern void secondary_startup(void); 25extern void secondary_startup(void);
26 26
27static int __cpuinit highbank_boot_secondary(unsigned int cpu, struct task_struct *idle) 27static int highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
28{ 28{
29 highbank_set_cpu_jump(cpu, secondary_startup); 29 highbank_set_cpu_jump(cpu, secondary_startup);
30 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 30 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 4282e99f5ca1..86567d980b07 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -199,7 +199,8 @@ static const char *pcie_axi_sels[] = { "axi", "ahb", };
199static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", }; 199static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", };
200static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; 200static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
201static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", }; 201static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
202static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 202static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
203static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
203static const char *vdo_axi_sels[] = { "axi", "ahb", }; 204static const char *vdo_axi_sels[] = { "axi", "ahb", };
204static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 205static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
205static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div", 206static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
@@ -392,7 +393,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
392 clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); 393 clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
393 clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); 394 clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
394 clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels)); 395 clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
395 clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels)); 396 clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels));
396 clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); 397 clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
397 clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); 398 clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
398 clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); 399 clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index d617c0b7c809..b169a396d93b 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -183,6 +183,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
183 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7); 183 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
184 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24); 184 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
185 clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23); 185 clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23);
186 clk[VF610_CLK_ENET0] = imx_clk_gate2("enet0", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(0));
187 clk[VF610_CLK_ENET1] = imx_clk_gate2("enet1", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(1));
186 188
187 clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7)); 189 clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7));
188 190
diff --git a/arch/arm/mach-imx/mx27.h b/arch/arm/mach-imx/mx27.h
index e074616d54ca..8a65f192e7f3 100644
--- a/arch/arm/mach-imx/mx27.h
+++ b/arch/arm/mach-imx/mx27.h
@@ -135,7 +135,7 @@
135#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4) 135#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4)
136#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5) 136#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5)
137#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6) 137#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6)
138#define MX27_INT_SDHC (NR_IRQS_LEGACY + 7) 138#define MX27_INT_MSHC (NR_IRQS_LEGACY + 7)
139#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8) 139#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8)
140#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9) 140#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9)
141#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10) 141#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10)
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index c6e1ab544882..1f24c1fdfea4 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -53,7 +53,7 @@ void imx_scu_standby_enable(void)
53 writel_relaxed(val, scu_base); 53 writel_relaxed(val, scu_base);
54} 54}
55 55
56static int __cpuinit imx_boot_secondary(unsigned int cpu, struct task_struct *idle) 56static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
57{ 57{
58 imx_set_cpu_jump(cpu, v7_secondary_startup); 58 imx_set_cpu_jump(cpu, v7_secondary_startup);
59 imx_enable_cpu(cpu, true); 59 imx_enable_cpu(cpu, true);
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index fe4d9ff93a7e..b661c5c2870a 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -49,7 +49,7 @@ static const char *keystone_match[] __initconst = {
49 NULL, 49 NULL,
50}; 50};
51 51
52void keystone_restart(char mode, const char *cmd) 52void keystone_restart(enum reboot_mode mode, const char *cmd)
53{ 53{
54 u32 val; 54 u32 val;
55 55
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index 1d4181e1daf2..14378e3fef16 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -21,7 +21,7 @@
21 21
22#include "keystone.h" 22#include "keystone.h"
23 23
24static int __cpuinit keystone_smp_boot_secondary(unsigned int cpu, 24static int keystone_smp_boot_secondary(unsigned int cpu,
25 struct task_struct *idle) 25 struct task_struct *idle)
26{ 26{
27 unsigned long start = virt_to_phys(&secondary_startup); 27 unsigned long start = virt_to_phys(&secondary_startup);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 614e41e7881b..905efc8cac79 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -121,8 +121,7 @@ config MSM_SMD
121 bool 121 bool
122 122
123config MSM_GPIOMUX 123config MSM_GPIOMUX
124 depends on !(ARCH_MSM8X60 || ARCH_MSM8960) 124 bool
125 bool "MSM V1 TLMM GPIOMUX architecture"
126 help 125 help
127 Support for MSM V1 TLMM GPIOMUX architecture. 126 Support for MSM V1 TLMM GPIOMUX architecture.
128 127
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c
deleted file mode 100644
index 27de2abd7144..000000000000
--- a/arch/arm/mach-msm/gpiomux-v1.c
+++ /dev/null
@@ -1,33 +0,0 @@
1/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17#include <linux/kernel.h>
18#include "gpiomux.h"
19#include "proc_comm.h"
20
21void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
22{
23 unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
24 ((gpio & 0x3ff) << 4);
25 unsigned tlmm_disable = 0;
26 int rc;
27
28 rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
29 &tlmm_config, &tlmm_disable);
30 if (rc)
31 pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
32 __func__, rc, tlmm_config, tlmm_disable);
33}
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
index 8e82f41a8923..4410d7766f93 100644
--- a/arch/arm/mach-msm/gpiomux.h
+++ b/arch/arm/mach-msm/gpiomux.h
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
73int msm_gpiomux_write(unsigned gpio, 73int msm_gpiomux_write(unsigned gpio,
74 gpiomux_config_t active, 74 gpiomux_config_t active,
75 gpiomux_config_t suspended); 75 gpiomux_config_t suspended);
76
77/* Architecture-internal function for use by the framework only.
78 * This function can assume the following:
79 * - the gpio value has passed a bounds-check
80 * - the gpiomux spinlock has been obtained
81 *
82 * This function is not for public consumption. External users
83 * should use msm_gpiomux_write.
84 */
85void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
86#else 76#else
87static inline int msm_gpiomux_write(unsigned gpio, 77static inline int msm_gpiomux_write(unsigned gpio,
88 gpiomux_config_t active, 78 gpiomux_config_t active,
diff --git a/arch/arm/mach-msm/headsmp.S b/arch/arm/mach-msm/headsmp.S
index bcd5af223dea..6c62c3f82fe6 100644
--- a/arch/arm/mach-msm/headsmp.S
+++ b/arch/arm/mach-msm/headsmp.S
@@ -11,8 +11,6 @@
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14 __CPUINIT
15
16/* 14/*
17 * MSM specific entry point for secondary CPUs. This provides 15 * MSM specific entry point for secondary CPUs. This provides
18 * a "holding pen" into which all secondary cores are held until we're 16 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 00cdb0a5dac8..3f06edcdd0ce 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -38,7 +38,7 @@ static inline int get_core_count(void)
38 return ((read_cpuid_id() >> 4) & 3) + 1; 38 return ((read_cpuid_id() >> 4) & 3) + 1;
39} 39}
40 40
41static void __cpuinit msm_secondary_init(unsigned int cpu) 41static void msm_secondary_init(unsigned int cpu)
42{ 42{
43 /* 43 /*
44 * let the primary processor know we're out of the 44 * let the primary processor know we're out of the
@@ -54,7 +54,7 @@ static void __cpuinit msm_secondary_init(unsigned int cpu)
54 spin_unlock(&boot_lock); 54 spin_unlock(&boot_lock);
55} 55}
56 56
57static __cpuinit void prepare_cold_cpu(unsigned int cpu) 57static void prepare_cold_cpu(unsigned int cpu)
58{ 58{
59 int ret; 59 int ret;
60 ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup), 60 ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup),
@@ -73,7 +73,7 @@ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
73 "address\n"); 73 "address\n");
74} 74}
75 75
76static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *idle) 76static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
77{ 77{
78 unsigned long timeout; 78 unsigned long timeout;
79 static int cold_boot_done; 79 static int cold_boot_done;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index b6418fd5fe0d..8697cfc0d0b6 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -139,7 +139,7 @@ static struct clocksource msm_clocksource = {
139}; 139};
140 140
141#ifdef CONFIG_LOCAL_TIMERS 141#ifdef CONFIG_LOCAL_TIMERS
142static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt) 142static int msm_local_timer_setup(struct clock_event_device *evt)
143{ 143{
144 /* Use existing clock_event for cpu 0 */ 144 /* Use existing clock_event for cpu 0 */
145 if (!smp_processor_id()) 145 if (!smp_processor_id())
@@ -164,7 +164,7 @@ static void msm_local_timer_stop(struct clock_event_device *evt)
164 disable_percpu_irq(evt->irq); 164 disable_percpu_irq(evt->irq);
165} 165}
166 166
167static struct local_timer_ops msm_local_timer_ops __cpuinitdata = { 167static struct local_timer_ops msm_local_timer_ops = {
168 .setup = msm_local_timer_setup, 168 .setup = msm_local_timer_setup,
169 .stop = msm_local_timer_stop, 169 .stop = msm_local_timer_stop,
170}; 170};
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index be117591f7f2..4c24303ec481 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -28,7 +28,7 @@
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include "armada-370-xp.h" 29#include "armada-370-xp.h"
30 30
31unsigned long __cpuinitdata coherency_phys_base; 31unsigned long coherency_phys_base;
32static void __iomem *coherency_base; 32static void __iomem *coherency_base;
33static void __iomem *coherency_cpu_base; 33static void __iomem *coherency_cpu_base;
34 34
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index 7147300c8af2..8a1b0c96e9ec 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,8 +21,6 @@
21#include <linux/linkage.h> 21#include <linux/linkage.h>
22#include <linux/init.h> 22#include <linux/init.h>
23 23
24 __CPUINIT
25
26/* 24/*
27 * Armada XP specific entry point for secondary CPUs. 25 * Armada XP specific entry point for secondary CPUs.
28 * We add the CPU to the coherency fabric and then jump to secondary 26 * We add the CPU to the coherency fabric and then jump to secondary
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index 93f2f3ab45f1..ce81d3031405 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -71,13 +71,12 @@ void __init set_secondary_cpus_clock(void)
71 } 71 }
72} 72}
73 73
74static void __cpuinit armada_xp_secondary_init(unsigned int cpu) 74static void armada_xp_secondary_init(unsigned int cpu)
75{ 75{
76 armada_xp_mpic_smp_cpu_init(); 76 armada_xp_mpic_smp_cpu_init();
77} 77}
78 78
79static int __cpuinit armada_xp_boot_secondary(unsigned int cpu, 79static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
80 struct task_struct *idle)
81{ 80{
82 pr_info("Booting CPU %d\n", cpu); 81 pr_info("Booting CPU %d\n", cpu);
83 82
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 627fa7e41fba..3eed0006d189 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,7 +62,7 @@ config SOC_OMAP5
62 select HAVE_SMP 62 select HAVE_SMP
63 select COMMON_CLK 63 select COMMON_CLK
64 select HAVE_ARM_ARCH_TIMER 64 select HAVE_ARM_ARCH_TIMER
65 select ARM_ERRATA_798181 65 select ARM_ERRATA_798181 if SMP
66 66
67config SOC_AM33XX 67config SOC_AM33XX
68 bool "AM33XX support" 68 bool "AM33XX support"
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index e5fbfed69aa2..be5d005ebad2 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -15,6 +15,7 @@
15#include <linux/of_irq.h> 15#include <linux/of_irq.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/irqdomain.h> 17#include <linux/irqdomain.h>
18#include <linux/clk.h>
18 19
19#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
20 21
@@ -35,6 +36,21 @@ static struct of_device_id omap_dt_match_table[] __initdata = {
35 { } 36 { }
36}; 37};
37 38
39/*
40 * Create alias for USB host PHY clock.
41 * Remove this when clock phandle can be provided via DT
42 */
43static void __init legacy_init_ehci_clk(char *clkname)
44{
45 int ret;
46
47 ret = clk_add_alias("main_clk", NULL, clkname, NULL);
48 if (ret) {
49 pr_err("%s:Failed to add main_clk alias to %s :%d\n",
50 __func__, clkname, ret);
51 }
52}
53
38static void __init omap_generic_init(void) 54static void __init omap_generic_init(void)
39{ 55{
40 omap_sdrc_init(NULL, NULL); 56 omap_sdrc_init(NULL, NULL);
@@ -45,10 +61,15 @@ static void __init omap_generic_init(void)
45 * HACK: call display setup code for selected boards to enable omapdss. 61 * HACK: call display setup code for selected boards to enable omapdss.
46 * This will be removed when omapdss supports DT. 62 * This will be removed when omapdss supports DT.
47 */ 63 */
48 if (of_machine_is_compatible("ti,omap4-panda")) 64 if (of_machine_is_compatible("ti,omap4-panda")) {
49 omap4_panda_display_init_of(); 65 omap4_panda_display_init_of();
66 legacy_init_ehci_clk("auxclk3_ck");
67
68 }
50 else if (of_machine_is_compatible("ti,omap4-sdp")) 69 else if (of_machine_is_compatible("ti,omap4-sdp"))
51 omap_4430sdp_display_init_of(); 70 omap_4430sdp_display_init_of();
71 else if (of_machine_is_compatible("ti,omap5-uevm"))
72 legacy_init_ehci_clk("auxclk1_ck");
52} 73}
53 74
54#ifdef CONFIG_SOC_OMAP2420 75#ifdef CONFIG_SOC_OMAP2420
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 393aeefaebb0..043e5705f2a6 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -42,7 +42,7 @@
42 42
43/* Using generic display panel */ 43/* Using generic display panel */
44static struct tfp410_platform_data omap4_dvi_panel = { 44static struct tfp410_platform_data omap4_dvi_panel = {
45 .i2c_bus_num = 3, 45 .i2c_bus_num = 2,
46 .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, 46 .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
47}; 47};
48 48
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4ea308114165..75e92952c18e 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -20,8 +20,6 @@
20 20
21#include "omap44xx.h" 21#include "omap44xx.h"
22 22
23 __CPUINIT
24
25/* Physical address needed since MMU not enabled yet on secondary core */ 23/* Physical address needed since MMU not enabled yet on secondary core */
26#define AUX_CORE_BOOT0_PA 0x48281800 24#define AUX_CORE_BOOT0_PA 0x48281800
27 25
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index f993a4188701..f991016e2a6a 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -291,7 +291,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
291 * @cpu : CPU ID 291 * @cpu : CPU ID
292 * @power_state: CPU low power state. 292 * @power_state: CPU low power state.
293 */ 293 */
294int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) 294int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
295{ 295{
296 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 296 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
297 unsigned int cpu_state = 0; 297 unsigned int cpu_state = 0;
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 98a11463a843..8708b2a9da45 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -51,7 +51,7 @@ void __iomem *omap4_get_scu_base(void)
51 return scu_base; 51 return scu_base;
52} 52}
53 53
54static void __cpuinit omap4_secondary_init(unsigned int cpu) 54static void omap4_secondary_init(unsigned int cpu)
55{ 55{
56 /* 56 /*
57 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. 57 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
@@ -72,7 +72,7 @@ static void __cpuinit omap4_secondary_init(unsigned int cpu)
72 spin_unlock(&boot_lock); 72 spin_unlock(&boot_lock);
73} 73}
74 74
75static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) 75static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
76{ 76{
77 static struct clockdomain *cpu1_clkdm; 77 static struct clockdomain *cpu1_clkdm;
78 static bool booted; 78 static bool booted;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f8bb3b9b6a76..813c61558a5f 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -323,8 +323,8 @@ static void irq_save_secure_context(void)
323#endif 323#endif
324 324
325#ifdef CONFIG_HOTPLUG_CPU 325#ifdef CONFIG_HOTPLUG_CPU
326static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, 326static int irq_cpu_hotplug_notify(struct notifier_block *self,
327 unsigned long action, void *hcpu) 327 unsigned long action, void *hcpu)
328{ 328{
329 unsigned int cpu = (unsigned int)hcpu; 329 unsigned int cpu = (unsigned int)hcpu;
330 330
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 5cc92874be7e..f99f68e1e85b 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
129 struct device_node *node = pdev->dev.of_node; 129 struct device_node *node = pdev->dev.of_node;
130 const char *oh_name; 130 const char *oh_name;
131 int oh_cnt, i, ret = 0; 131 int oh_cnt, i, ret = 0;
132 bool device_active = false;
132 133
133 oh_cnt = of_property_count_strings(node, "ti,hwmods"); 134 oh_cnt = of_property_count_strings(node, "ti,hwmods");
134 if (oh_cnt <= 0) { 135 if (oh_cnt <= 0) {
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
152 goto odbfd_exit1; 153 goto odbfd_exit1;
153 } 154 }
154 hwmods[i] = oh; 155 hwmods[i] = oh;
156 if (oh->flags & HWMOD_INIT_NO_IDLE)
157 device_active = true;
155 } 158 }
156 159
157 od = omap_device_alloc(pdev, hwmods, oh_cnt); 160 od = omap_device_alloc(pdev, hwmods, oh_cnt);
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
172 175
173 pdev->dev.pm_domain = &omap_device_pm_domain; 176 pdev->dev.pm_domain = &omap_device_pm_domain;
174 177
178 if (device_active) {
179 omap_device_enable(pdev);
180 pm_runtime_set_active(&pdev->dev);
181 }
182
175odbfd_exit1: 183odbfd_exit1:
176 kfree(hwmods); 184 kfree(hwmods);
177odbfd_exit: 185odbfd_exit:
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
842{ 850{
843 struct platform_device *pdev = to_platform_device(dev); 851 struct platform_device *pdev = to_platform_device(dev);
844 struct omap_device *od = to_omap_device(pdev); 852 struct omap_device *od = to_omap_device(pdev);
853 int i;
845 854
846 if (!od) 855 if (!od)
847 return 0; 856 return 0;
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
850 * If omap_device state is enabled, but has no driver bound, 859 * If omap_device state is enabled, but has no driver bound,
851 * idle it. 860 * idle it.
852 */ 861 */
862
863 /*
864 * Some devices (like memory controllers) are always kept
865 * enabled, and should not be idled even with no drivers.
866 */
867 for (i = 0; i < od->hwmods_cnt; i++)
868 if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
869 return 0;
870
853 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { 871 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
854 if (od->_state == OMAP_DEVICE_STATE_ENABLED) { 872 if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
855 dev_warn(dev, "%s: enabled but no driver. Idling\n", 873 dev_warn(dev, "%s: enabled but no driver. Idling\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 7341eff63f56..7f4db12b1459 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
2386 2386
2387 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); 2387 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
2388 if (np) 2388 if (np)
2389 va_start = of_iomap(np, 0); 2389 va_start = of_iomap(np, oh->mpu_rt_idx);
2390 } else { 2390 } else {
2391 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); 2391 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
2392 } 2392 }
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index aab33fd814c0..e1482a9b3bc2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
95#define MODULEMODE_HWCTRL 1 95#define MODULEMODE_HWCTRL 1
96#define MODULEMODE_SWCTRL 2 96#define MODULEMODE_SWCTRL 2
97 97
98#define DEBUG_OMAP2UART1_FLAGS 0
99#define DEBUG_OMAP2UART2_FLAGS 0
100#define DEBUG_OMAP2UART3_FLAGS 0
101#define DEBUG_OMAP3UART3_FLAGS 0
102#define DEBUG_OMAP3UART4_FLAGS 0
103#define DEBUG_OMAP4UART3_FLAGS 0
104#define DEBUG_OMAP4UART4_FLAGS 0
105#define DEBUG_TI81XXUART1_FLAGS 0
106#define DEBUG_TI81XXUART2_FLAGS 0
107#define DEBUG_TI81XXUART3_FLAGS 0
108#define DEBUG_AM33XXUART1_FLAGS 0
109
110#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
111
112#if defined(CONFIG_DEBUG_OMAP2UART1)
113#undef DEBUG_OMAP2UART1_FLAGS
114#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
115#elif defined(CONFIG_DEBUG_OMAP2UART2)
116#undef DEBUG_OMAP2UART2_FLAGS
117#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
118#elif defined(CONFIG_DEBUG_OMAP2UART3)
119#undef DEBUG_OMAP2UART3_FLAGS
120#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
121#elif defined(CONFIG_DEBUG_OMAP3UART3)
122#undef DEBUG_OMAP3UART3_FLAGS
123#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
124#elif defined(CONFIG_DEBUG_OMAP3UART4)
125#undef DEBUG_OMAP3UART4_FLAGS
126#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
127#elif defined(CONFIG_DEBUG_OMAP4UART3)
128#undef DEBUG_OMAP4UART3_FLAGS
129#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
130#elif defined(CONFIG_DEBUG_OMAP4UART4)
131#undef DEBUG_OMAP4UART4_FLAGS
132#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
133#elif defined(CONFIG_DEBUG_TI81XXUART1)
134#undef DEBUG_TI81XXUART1_FLAGS
135#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
136#elif defined(CONFIG_DEBUG_TI81XXUART2)
137#undef DEBUG_TI81XXUART2_FLAGS
138#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
139#elif defined(CONFIG_DEBUG_TI81XXUART3)
140#undef DEBUG_TI81XXUART3_FLAGS
141#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
142#elif defined(CONFIG_DEBUG_AM33XXUART1)
143#undef DEBUG_AM33XXUART1_FLAGS
144#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
145#endif
98 146
99/** 147/**
100 * struct omap_hwmod_mux_info - hwmod specific mux configuration 148 * struct omap_hwmod_mux_info - hwmod specific mux configuration
@@ -568,6 +616,7 @@ struct omap_hwmod_link {
568 * @voltdm: pointer to voltage domain (filled in at runtime) 616 * @voltdm: pointer to voltage domain (filled in at runtime)
569 * @dev_attr: arbitrary device attributes that can be passed to the driver 617 * @dev_attr: arbitrary device attributes that can be passed to the driver
570 * @_sysc_cache: internal-use hwmod flags 618 * @_sysc_cache: internal-use hwmod flags
619 * @mpu_rt_idx: index of device address space for register target (for DT boot)
571 * @_mpu_rt_va: cached register target start address (internal use) 620 * @_mpu_rt_va: cached register target start address (internal use)
572 * @_mpu_port: cached MPU register target slave (internal use) 621 * @_mpu_port: cached MPU register target slave (internal use)
573 * @opt_clks_cnt: number of @opt_clks 622 * @opt_clks_cnt: number of @opt_clks
@@ -617,6 +666,7 @@ struct omap_hwmod {
617 struct list_head node; 666 struct list_head node;
618 struct omap_hwmod_ocp_if *_mpu_port; 667 struct omap_hwmod_ocp_if *_mpu_port;
619 u16 flags; 668 u16 flags;
669 u8 mpu_rt_idx;
620 u8 response_lat; 670 u8 response_lat;
621 u8 rst_lines_cnt; 671 u8 rst_lines_cnt;
622 u8 opt_clks_cnt; 672 u8 opt_clks_cnt;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index d05fc7b54567..56cebb05509e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
512 .mpu_irqs = omap2_uart1_mpu_irqs, 512 .mpu_irqs = omap2_uart1_mpu_irqs,
513 .sdma_reqs = omap2_uart1_sdma_reqs, 513 .sdma_reqs = omap2_uart1_sdma_reqs,
514 .main_clk = "uart1_fck", 514 .main_clk = "uart1_fck",
515 .flags = HWMOD_SWSUP_SIDLE_ACT, 515 .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
516 .prcm = { 516 .prcm = {
517 .omap2 = { 517 .omap2 = {
518 .module_offs = CORE_MOD, 518 .module_offs = CORE_MOD,
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
532 .mpu_irqs = omap2_uart2_mpu_irqs, 532 .mpu_irqs = omap2_uart2_mpu_irqs,
533 .sdma_reqs = omap2_uart2_sdma_reqs, 533 .sdma_reqs = omap2_uart2_sdma_reqs,
534 .main_clk = "uart2_fck", 534 .main_clk = "uart2_fck",
535 .flags = HWMOD_SWSUP_SIDLE_ACT, 535 .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
536 .prcm = { 536 .prcm = {
537 .omap2 = { 537 .omap2 = {
538 .module_offs = CORE_MOD, 538 .module_offs = CORE_MOD,
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
552 .mpu_irqs = omap2_uart3_mpu_irqs, 552 .mpu_irqs = omap2_uart3_mpu_irqs,
553 .sdma_reqs = omap2_uart3_sdma_reqs, 553 .sdma_reqs = omap2_uart3_sdma_reqs,
554 .main_clk = "uart3_fck", 554 .main_clk = "uart3_fck",
555 .flags = HWMOD_SWSUP_SIDLE_ACT, 555 .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
556 .prcm = { 556 .prcm = {
557 .omap2 = { 557 .omap2 = {
558 .module_offs = CORE_MOD, 558 .module_offs = CORE_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 28bbd56346a9..eb2f3b93b51c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = {
562 .clkdm_name = "cpsw_125mhz_clkdm", 562 .clkdm_name = "cpsw_125mhz_clkdm",
563 .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), 563 .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
564 .main_clk = "cpsw_125mhz_gclk", 564 .main_clk = "cpsw_125mhz_gclk",
565 .mpu_rt_idx = 1,
565 .prcm = { 566 .prcm = {
566 .omap4 = { 567 .omap4 = {
567 .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, 568 .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
1512 .name = "uart1", 1513 .name = "uart1",
1513 .class = &uart_class, 1514 .class = &uart_class,
1514 .clkdm_name = "l4_wkup_clkdm", 1515 .clkdm_name = "l4_wkup_clkdm",
1515 .flags = HWMOD_SWSUP_SIDLE_ACT, 1516 .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
1516 .main_clk = "dpll_per_m2_div4_wkupdm_ck", 1517 .main_clk = "dpll_per_m2_div4_wkupdm_ck",
1517 .prcm = { 1518 .prcm = {
1518 .omap4 = { 1519 .omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index f7a3df2fb579..0c3a427da544 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
490 .mpu_irqs = omap2_uart1_mpu_irqs, 490 .mpu_irqs = omap2_uart1_mpu_irqs,
491 .sdma_reqs = omap2_uart1_sdma_reqs, 491 .sdma_reqs = omap2_uart1_sdma_reqs,
492 .main_clk = "uart1_fck", 492 .main_clk = "uart1_fck",
493 .flags = HWMOD_SWSUP_SIDLE_ACT, 493 .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
494 .prcm = { 494 .prcm = {
495 .omap2 = { 495 .omap2 = {
496 .module_offs = CORE_MOD, 496 .module_offs = CORE_MOD,
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
509 .mpu_irqs = omap2_uart2_mpu_irqs, 509 .mpu_irqs = omap2_uart2_mpu_irqs,
510 .sdma_reqs = omap2_uart2_sdma_reqs, 510 .sdma_reqs = omap2_uart2_sdma_reqs,
511 .main_clk = "uart2_fck", 511 .main_clk = "uart2_fck",
512 .flags = HWMOD_SWSUP_SIDLE_ACT, 512 .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
513 .prcm = { 513 .prcm = {
514 .omap2 = { 514 .omap2 = {
515 .module_offs = CORE_MOD, 515 .module_offs = CORE_MOD,
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
528 .mpu_irqs = omap2_uart3_mpu_irqs, 528 .mpu_irqs = omap2_uart3_mpu_irqs,
529 .sdma_reqs = omap2_uart3_sdma_reqs, 529 .sdma_reqs = omap2_uart3_sdma_reqs,
530 .main_clk = "uart3_fck", 530 .main_clk = "uart3_fck",
531 .flags = HWMOD_SWSUP_SIDLE_ACT, 531 .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
532 HWMOD_SWSUP_SIDLE_ACT,
532 .prcm = { 533 .prcm = {
533 .omap2 = { 534 .omap2 = {
534 .module_offs = OMAP3430_PER_MOD, 535 .module_offs = OMAP3430_PER_MOD,
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
558 .mpu_irqs = uart4_mpu_irqs, 559 .mpu_irqs = uart4_mpu_irqs,
559 .sdma_reqs = uart4_sdma_reqs, 560 .sdma_reqs = uart4_sdma_reqs,
560 .main_clk = "uart4_fck", 561 .main_clk = "uart4_fck",
561 .flags = HWMOD_SWSUP_SIDLE_ACT, 562 .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
562 .prcm = { 563 .prcm = {
563 .omap2 = { 564 .omap2 = {
564 .module_offs = OMAP3430_PER_MOD, 565 .module_offs = OMAP3430_PER_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index d04b5e60fdbe..9c3b504477d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
2858 .name = "uart3", 2858 .name = "uart3",
2859 .class = &omap44xx_uart_hwmod_class, 2859 .class = &omap44xx_uart_hwmod_class,
2860 .clkdm_name = "l4_per_clkdm", 2860 .clkdm_name = "l4_per_clkdm",
2861 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | 2861 .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
2862 HWMOD_SWSUP_SIDLE_ACT,
2863 .main_clk = "func_48m_fclk", 2862 .main_clk = "func_48m_fclk",
2864 .prcm = { 2863 .prcm = {
2865 .omap4 = { 2864 .omap4 = {
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
2875 .name = "uart4", 2874 .name = "uart4",
2876 .class = &omap44xx_uart_hwmod_class, 2875 .class = &omap44xx_uart_hwmod_class,
2877 .clkdm_name = "l4_per_clkdm", 2876 .clkdm_name = "l4_per_clkdm",
2878 .flags = HWMOD_SWSUP_SIDLE_ACT, 2877 .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
2879 .main_clk = "func_48m_fclk", 2878 .main_clk = "func_48m_fclk",
2880 .prcm = { 2879 .prcm = {
2881 .omap4 = { 2880 .omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index f37ae96b70a1..3c70f5c1860f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = {
1375 .name = "uart3", 1375 .name = "uart3",
1376 .class = &omap54xx_uart_hwmod_class, 1376 .class = &omap54xx_uart_hwmod_class,
1377 .clkdm_name = "l4per_clkdm", 1377 .clkdm_name = "l4per_clkdm",
1378 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, 1378 .flags = DEBUG_OMAP4UART3_FLAGS,
1379 .main_clk = "func_48m_fclk", 1379 .main_clk = "func_48m_fclk",
1380 .prcm = { 1380 .prcm = {
1381 .omap4 = { 1381 .omap4 = {
@@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = {
1391 .name = "uart4", 1391 .name = "uart4",
1392 .class = &omap54xx_uart_hwmod_class, 1392 .class = &omap54xx_uart_hwmod_class,
1393 .clkdm_name = "l4per_clkdm", 1393 .clkdm_name = "l4per_clkdm",
1394 .flags = DEBUG_OMAP4UART4_FLAGS,
1394 .main_clk = "func_48m_fclk", 1395 .main_clk = "func_48m_fclk",
1395 .prcm = { 1396 .prcm = {
1396 .omap4 = { 1397 .omap4 = {
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 3a674de6cb63..a388f8c1bcb3 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void)
208 pr_info("%s used as console in debug mode: uart%d clocks will not be gated", 208 pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
209 uart_name, uart->num); 209 uart_name, uart->num);
210 } 210 }
211
212 /*
213 * omap-uart can be used for earlyprintk logs
214 * So if omap-uart is used as console then prevent
215 * uart reset and idle to get logs from omap-uart
216 * until uart console driver is available to take
217 * care for console messages.
218 * Idling or resetting omap-uart while printing logs
219 * early boot logs can stall the boot-up.
220 */
221 oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
222 } 211 }
223 } while (1); 212 } while (1);
224 213
diff --git a/arch/arm/mach-prima2/headsmp.S b/arch/arm/mach-prima2/headsmp.S
index 5b8a408d8921..d86fe33c5f53 100644
--- a/arch/arm/mach-prima2/headsmp.S
+++ b/arch/arm/mach-prima2/headsmp.S
@@ -9,8 +9,6 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/init.h> 10#include <linux/init.h>
11 11
12 __CPUINIT
13
14/* 12/*
15 * SIRFSOC specific entry point for secondary CPUs. This provides 13 * SIRFSOC specific entry point for secondary CPUs. This provides
16 * a "holding pen" into which all secondary cores are held until we're 14 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index 1c3de7bed841..3dbcb1ab6e37 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -44,7 +44,7 @@ void __init sirfsoc_map_scu(void)
44 scu_base = (void __iomem *)SIRFSOC_VA(base); 44 scu_base = (void __iomem *)SIRFSOC_VA(base);
45} 45}
46 46
47static void __cpuinit sirfsoc_secondary_init(unsigned int cpu) 47static void sirfsoc_secondary_init(unsigned int cpu)
48{ 48{
49 /* 49 /*
50 * let the primary processor know we're out of the 50 * let the primary processor know we're out of the
@@ -65,7 +65,7 @@ static struct of_device_id rsc_ids[] = {
65 {}, 65 {},
66}; 66};
67 67
68static int __cpuinit sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) 68static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
69{ 69{
70 unsigned long timeout; 70 unsigned long timeout;
71 struct device_node *np; 71 struct device_node *np;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index f6726bb4eb95..3a3362fa793e 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -477,16 +477,24 @@ static int em_x270_usb_hub_init(void)
477 /* USB Hub power-on and reset */ 477 /* USB Hub power-on and reset */
478 gpio_direction_output(usb_hub_reset, 1); 478 gpio_direction_output(usb_hub_reset, 1);
479 gpio_direction_output(GPIO9_USB_VBUS_EN, 0); 479 gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
480 regulator_enable(em_x270_usb_ldo); 480 err = regulator_enable(em_x270_usb_ldo);
481 if (err)
482 goto err_free_rst_gpio;
483
481 gpio_set_value(usb_hub_reset, 0); 484 gpio_set_value(usb_hub_reset, 0);
482 gpio_set_value(usb_hub_reset, 1); 485 gpio_set_value(usb_hub_reset, 1);
483 regulator_disable(em_x270_usb_ldo); 486 regulator_disable(em_x270_usb_ldo);
484 regulator_enable(em_x270_usb_ldo); 487 err = regulator_enable(em_x270_usb_ldo);
488 if (err)
489 goto err_free_rst_gpio;
490
485 gpio_set_value(usb_hub_reset, 0); 491 gpio_set_value(usb_hub_reset, 0);
486 gpio_set_value(GPIO9_USB_VBUS_EN, 1); 492 gpio_set_value(GPIO9_USB_VBUS_EN, 1);
487 493
488 return 0; 494 return 0;
489 495
496err_free_rst_gpio:
497 gpio_free(usb_hub_reset);
490err_free_vbus_gpio: 498err_free_vbus_gpio:
491 gpio_free(GPIO9_USB_VBUS_EN); 499 gpio_free(GPIO9_USB_VBUS_EN);
492err_free_usb_ldo: 500err_free_usb_ldo:
@@ -592,7 +600,7 @@ err_irq:
592 return err; 600 return err;
593} 601}
594 602
595static void em_x270_mci_setpower(struct device *dev, unsigned int vdd) 603static int em_x270_mci_setpower(struct device *dev, unsigned int vdd)
596{ 604{
597 struct pxamci_platform_data* p_d = dev->platform_data; 605 struct pxamci_platform_data* p_d = dev->platform_data;
598 606
@@ -600,10 +608,11 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
600 int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000; 608 int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000;
601 609
602 regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV); 610 regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV);
603 regulator_enable(em_x270_sdio_ldo); 611 return regulator_enable(em_x270_sdio_ldo);
604 } else { 612 } else {
605 regulator_disable(em_x270_sdio_ldo); 613 regulator_disable(em_x270_sdio_ldo);
606 } 614 }
615 return 0;
607} 616}
608 617
609static void em_x270_mci_exit(struct device *dev, void *data) 618static void em_x270_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d2c652318376..dd70343c8708 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -408,7 +408,7 @@ static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_in
408 return err; 408 return err;
409} 409}
410 410
411static void mainstone_mci_setpower(struct device *dev, unsigned int vdd) 411static int mainstone_mci_setpower(struct device *dev, unsigned int vdd)
412{ 412{
413 struct pxamci_platform_data* p_d = dev->platform_data; 413 struct pxamci_platform_data* p_d = dev->platform_data;
414 414
@@ -420,6 +420,7 @@ static void mainstone_mci_setpower(struct device *dev, unsigned int vdd)
420 printk(KERN_DEBUG "%s: off\n", __func__); 420 printk(KERN_DEBUG "%s: off\n", __func__);
421 MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON; 421 MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON;
422 } 422 }
423 return 0;
423} 424}
424 425
425static void mainstone_mci_exit(struct device *dev, void *data) 426static void mainstone_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index fb7f1d1627dc..13e5b00eae90 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -335,7 +335,7 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
335 return err; 335 return err;
336} 336}
337 337
338static void pcm990_mci_setpower(struct device *dev, unsigned int vdd) 338static int pcm990_mci_setpower(struct device *dev, unsigned int vdd)
339{ 339{
340 struct pxamci_platform_data *p_d = dev->platform_data; 340 struct pxamci_platform_data *p_d = dev->platform_data;
341 u8 val; 341 u8 val;
@@ -348,6 +348,7 @@ static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
348 val &= ~PCM990_CTRL_MMC2PWR; 348 val &= ~PCM990_CTRL_MMC2PWR;
349 349
350 pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5); 350 pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5);
351 return 0;
351} 352}
352 353
353static void pcm990_mci_exit(struct device *dev, void *data) 354static void pcm990_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 711d37e26bd8..aedf053a1de5 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -258,7 +258,7 @@ err_free_2:
258 return err; 258 return err;
259} 259}
260 260
261static void poodle_mci_setpower(struct device *dev, unsigned int vdd) 261static int poodle_mci_setpower(struct device *dev, unsigned int vdd)
262{ 262{
263 struct pxamci_platform_data* p_d = dev->platform_data; 263 struct pxamci_platform_data* p_d = dev->platform_data;
264 264
@@ -270,6 +270,8 @@ static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
270 gpio_set_value(POODLE_GPIO_SD_PWR1, 0); 270 gpio_set_value(POODLE_GPIO_SD_PWR1, 0);
271 gpio_set_value(POODLE_GPIO_SD_PWR, 0); 271 gpio_set_value(POODLE_GPIO_SD_PWR, 0);
272 } 272 }
273
274 return 0;
273} 275}
274 276
275static void poodle_mci_exit(struct device *dev, void *data) 277static void poodle_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 2125df0444e7..4c29173026e8 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -598,7 +598,7 @@ static inline void spitz_spi_init(void) {}
598 * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to 598 * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to
599 * give the card a chance to fully insert/eject. 599 * give the card a chance to fully insert/eject.
600 */ 600 */
601static void spitz_mci_setpower(struct device *dev, unsigned int vdd) 601static int spitz_mci_setpower(struct device *dev, unsigned int vdd)
602{ 602{
603 struct pxamci_platform_data* p_d = dev->platform_data; 603 struct pxamci_platform_data* p_d = dev->platform_data;
604 604
@@ -606,6 +606,8 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
606 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V); 606 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V);
607 else 607 else
608 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0); 608 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0);
609
610 return 0;
609} 611}
610 612
611static struct pxamci_platform_data spitz_mci_platform_data = { 613static struct pxamci_platform_data spitz_mci_platform_data = {
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 88fde43c948c..62aea3e835f3 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -734,9 +734,10 @@ static int stargate2_mci_init(struct device *dev,
734 * 734 *
735 * Very simple control. Either it is on or off and is controlled by 735 * Very simple control. Either it is on or off and is controlled by
736 * a gpio pin */ 736 * a gpio pin */
737static void stargate2_mci_setpower(struct device *dev, unsigned int vdd) 737static int stargate2_mci_setpower(struct device *dev, unsigned int vdd)
738{ 738{
739 gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd); 739 gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd);
740 return 0;
740} 741}
741 742
742static void stargate2_mci_exit(struct device *dev, void *data) 743static void stargate2_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 6d9252e081ce..7791ac76f945 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -208,7 +208,7 @@ config S3C24XX_GPIO_EXTRA128
208 208
209config S3C24XX_PLL 209config S3C24XX_PLL
210 bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)" 210 bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
211 depends on ARM_S3C24XX 211 depends on ARM_S3C24XX_CPUFREQ
212 help 212 help
213 Compile in support for changing the PLL frequency from the 213 Compile in support for changing the PLL frequency from the
214 S3C24XX series CPUfreq driver. The PLL takes time to settle 214 S3C24XX series CPUfreq driver. The PLL takes time to settle
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
index 34fffdf6fc1d..564553694b54 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
@@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
119 } 119 }
120}; 120};
121 121
122static struct clk init_clocks[] = { 122static struct clk clk_lcd = {
123 { 123 .name = "lcd",
124 .name = "lcd", 124 .parent = &clk_h,
125 .parent = &clk_h, 125 .enable = s3c2410_clkcon_enable,
126 .enable = s3c2410_clkcon_enable, 126 .ctrlbit = S3C2410_CLKCON_LCDC,
127 .ctrlbit = S3C2410_CLKCON_LCDC, 127};
128 }, { 128
129 .name = "gpio", 129static struct clk clk_gpio = {
130 .parent = &clk_p, 130 .name = "gpio",
131 .enable = s3c2410_clkcon_enable, 131 .parent = &clk_p,
132 .ctrlbit = S3C2410_CLKCON_GPIO, 132 .enable = s3c2410_clkcon_enable,
133 }, { 133 .ctrlbit = S3C2410_CLKCON_GPIO,
134 .name = "usb-host", 134};
135 .parent = &clk_h, 135
136 .enable = s3c2410_clkcon_enable, 136static struct clk clk_usb_host = {
137 .ctrlbit = S3C2410_CLKCON_USBH, 137 .name = "usb-host",
138 }, { 138 .parent = &clk_h,
139 .name = "usb-device", 139 .enable = s3c2410_clkcon_enable,
140 .parent = &clk_h, 140 .ctrlbit = S3C2410_CLKCON_USBH,
141 .enable = s3c2410_clkcon_enable, 141};
142 .ctrlbit = S3C2410_CLKCON_USBD, 142
143 }, { 143static struct clk clk_usb_device = {
144 .name = "timers", 144 .name = "usb-device",
145 .parent = &clk_p, 145 .parent = &clk_h,
146 .enable = s3c2410_clkcon_enable, 146 .enable = s3c2410_clkcon_enable,
147 .ctrlbit = S3C2410_CLKCON_PWMT, 147 .ctrlbit = S3C2410_CLKCON_USBD,
148 }, { 148};
149 .name = "uart", 149
150 .devname = "s3c2410-uart.0", 150static struct clk clk_timers = {
151 .parent = &clk_p, 151 .name = "timers",
152 .enable = s3c2410_clkcon_enable, 152 .parent = &clk_p,
153 .ctrlbit = S3C2410_CLKCON_UART0, 153 .enable = s3c2410_clkcon_enable,
154 }, { 154 .ctrlbit = S3C2410_CLKCON_PWMT,
155 .name = "uart", 155};
156 .devname = "s3c2410-uart.1", 156
157 .parent = &clk_p, 157struct clk s3c24xx_clk_uart0 = {
158 .enable = s3c2410_clkcon_enable, 158 .name = "uart",
159 .ctrlbit = S3C2410_CLKCON_UART1, 159 .devname = "s3c2410-uart.0",
160 }, { 160 .parent = &clk_p,
161 .name = "uart", 161 .enable = s3c2410_clkcon_enable,
162 .devname = "s3c2410-uart.2", 162 .ctrlbit = S3C2410_CLKCON_UART0,
163 .parent = &clk_p, 163};
164 .enable = s3c2410_clkcon_enable, 164
165 .ctrlbit = S3C2410_CLKCON_UART2, 165struct clk s3c24xx_clk_uart1 = {
166 }, { 166 .name = "uart",
167 .name = "rtc", 167 .devname = "s3c2410-uart.1",
168 .parent = &clk_p, 168 .parent = &clk_p,
169 .enable = s3c2410_clkcon_enable, 169 .enable = s3c2410_clkcon_enable,
170 .ctrlbit = S3C2410_CLKCON_RTC, 170 .ctrlbit = S3C2410_CLKCON_UART1,
171 }, { 171};
172 .name = "watchdog", 172
173 .parent = &clk_p, 173struct clk s3c24xx_clk_uart2 = {
174 .ctrlbit = 0, 174 .name = "uart",
175 }, { 175 .devname = "s3c2410-uart.2",
176 .name = "usb-bus-host", 176 .parent = &clk_p,
177 .parent = &clk_usb_bus, 177 .enable = s3c2410_clkcon_enable,
178 }, { 178 .ctrlbit = S3C2410_CLKCON_UART2,
179 .name = "usb-bus-gadget", 179};
180 .parent = &clk_usb_bus, 180
181 }, 181static struct clk clk_rtc = {
182 .name = "rtc",
183 .parent = &clk_p,
184 .enable = s3c2410_clkcon_enable,
185 .ctrlbit = S3C2410_CLKCON_RTC,
186};
187
188static struct clk clk_watchdog = {
189 .name = "watchdog",
190 .parent = &clk_p,
191 .ctrlbit = 0,
192};
193
194static struct clk clk_usb_bus_host = {
195 .name = "usb-bus-host",
196 .parent = &clk_usb_bus,
197};
198
199static struct clk clk_usb_bus_gadget = {
200 .name = "usb-bus-gadget",
201 .parent = &clk_usb_bus,
202};
203
204static struct clk *init_clocks[] = {
205 &clk_lcd,
206 &clk_gpio,
207 &clk_usb_host,
208 &clk_usb_device,
209 &clk_timers,
210 &s3c24xx_clk_uart0,
211 &s3c24xx_clk_uart1,
212 &s3c24xx_clk_uart2,
213 &clk_rtc,
214 &clk_watchdog,
215 &clk_usb_bus_host,
216 &clk_usb_bus_gadget,
182}; 217};
183 218
184/* s3c2410_baseclk_add() 219/* s3c2410_baseclk_add()
@@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
195{ 230{
196 unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW); 231 unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
197 unsigned long clkcon = __raw_readl(S3C2410_CLKCON); 232 unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
198 struct clk *clkp;
199 struct clk *xtal; 233 struct clk *xtal;
200 int ret; 234 int ret;
201 int ptr; 235 int ptr;
@@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
207 241
208 /* register clocks from clock array */ 242 /* register clocks from clock array */
209 243
210 clkp = init_clocks; 244 for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
211 for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) { 245 struct clk *clkp = init_clocks[ptr];
246
212 /* ensure that we note the clock state */ 247 /* ensure that we note the clock state */
213 248
214 clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0; 249 clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 1069b5680826..aaf006d1d6dc 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
166 CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk), 166 CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
167 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p), 167 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
168 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n), 168 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
169 CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
170 CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
171 CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
169 CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll), 172 CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
170}; 173};
171 174
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index e115f6742107..c5be60d85e4b 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1162,9 +1162,6 @@ static void __init eva_init(void)
1162 gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ 1162 gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
1163 gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ 1163 gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
1164 1164
1165 /* Touchscreen */
1166 gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
1167
1168 /* GETHER */ 1165 /* GETHER */
1169 gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ 1166 gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
1170 1167
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index d5554646916c..3354a85c90f7 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
167 "usb1", "usb1"), 167 "usb1", "usb1"),
168 /* SDHI0 */ 168 /* SDHI0 */
169 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", 169 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
170 "sdhi0", "sdhi0"), 170 "sdhi0_data4", "sdhi0"),
171 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
172 "sdhi0_ctrl", "sdhi0"),
173 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
174 "sdhi0_cd", "sdhi0"),
175 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
176 "sdhi0_wp", "sdhi0"),
171}; 177};
172 178
173#define FPGA 0x18200000 179#define FPGA 0x18200000
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index d73e21d3ea8a..8d6bd5c5efb9 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = {
59#define GPIO_KEY(c, g, d, ...) \ 59#define GPIO_KEY(c, g, d, ...) \
60 { .code = c, .gpio = g, .desc = d, .active_low = 1 } 60 { .code = c, .gpio = g, .desc = d, .active_low = 1 }
61 61
62static __initdata struct gpio_keys_button gpio_buttons[] = { 62static struct gpio_keys_button gpio_buttons[] = {
63 GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), 63 GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
64 GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), 64 GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
65 GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), 65 GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index 6f9865467258..bfd920083a3b 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -23,7 +23,6 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <asm/memory.h> 24#include <asm/memory.h>
25 25
26 __CPUINIT
27/* 26/*
28 * Boot code for secondary CPUs. 27 * Boot code for secondary CPUs.
29 * 28 *
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 559d1ce5f57e..a9d212498987 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -14,8 +14,6 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/memory.h> 15#include <asm/memory.h>
16 16
17 __CPUINIT
18
19ENTRY(shmobile_invalidate_start) 17ENTRY(shmobile_invalidate_start)
20 bl v7_invalidate_l1 18 bl v7_invalidate_l1
21 b secondary_startup 19 b secondary_startup
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 80991b35f4ac..22a05a869d25 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -30,7 +30,7 @@
30 30
31#define EMEV2_SCU_BASE 0x1e000000 31#define EMEV2_SCU_BASE 0x1e000000
32 32
33static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *idle) 33static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
34{ 34{
35 arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu))); 35 arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
36 return 0; 36 return 0;
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 526cfaae81c1..9bdf810f2a87 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -81,7 +81,7 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
81 return ret ? ret : 1; 81 return ret ? ret : 1;
82} 82}
83 83
84static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle) 84static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
85{ 85{
86 struct r8a7779_pm_ch *ch = NULL; 86 struct r8a7779_pm_ch *ch = NULL;
87 int ret = -EIO; 87 int ret = -EIO;
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d613113a04bd..d5fc3ed4e315 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -48,7 +48,7 @@ void __init sh73a0_register_twd(void)
48} 48}
49#endif 49#endif
50 50
51static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle) 51static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
52{ 52{
53 cpu = cpu_logical_map(cpu); 53 cpu = cpu_logical_map(cpu);
54 54
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index 9004bfb1756e..95c115d8b5ee 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -10,7 +10,6 @@
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12 12
13 __CPUINIT
14 .arch armv7-a 13 .arch armv7-a
15 14
16ENTRY(secondary_trampoline) 15ENTRY(secondary_trampoline)
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index b51ce8c7929d..5356a72bc8ce 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -29,7 +29,7 @@
29 29
30#include "core.h" 30#include "core.h"
31 31
32static int __cpuinit socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) 32static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
33{ 33{
34 int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; 34 int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
35 35
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index 904f2c907b46..a99d90a4d09c 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -37,7 +37,7 @@ void __init spear13xx_l2x0_init(void);
37void spear_restart(enum reboot_mode, const char *); 37void spear_restart(enum reboot_mode, const char *);
38 38
39void spear13xx_secondary_startup(void); 39void spear13xx_secondary_startup(void);
40void __cpuinit spear13xx_cpu_die(unsigned int cpu); 40void spear13xx_cpu_die(unsigned int cpu);
41 41
42extern struct smp_operations spear13xx_smp_ops; 42extern struct smp_operations spear13xx_smp_ops;
43 43
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index 9c4c722c954e..5c4a19887b2b 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -24,7 +24,7 @@ static DEFINE_SPINLOCK(boot_lock);
24 24
25static void __iomem *scu_base = IOMEM(VA_SCU_BASE); 25static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
26 26
27static void __cpuinit spear13xx_secondary_init(unsigned int cpu) 27static void spear13xx_secondary_init(unsigned int cpu)
28{ 28{
29 /* 29 /*
30 * let the primary processor know we're out of the 30 * let the primary processor know we're out of the
@@ -40,7 +40,7 @@ static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
40 spin_unlock(&boot_lock); 40 spin_unlock(&boot_lock);
41} 41}
42 42
43static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) 43static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
44{ 44{
45 unsigned long timeout; 45 unsigned long timeout;
46 46
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index d04e3bfe1918..835833e3c4f8 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -11,8 +11,9 @@ menuconfig ARCH_STI
11 select HAVE_SMP 11 select HAVE_SMP
12 select HAVE_ARM_SCU if SMP 12 select HAVE_ARM_SCU if SMP
13 select ARCH_REQUIRE_GPIOLIB 13 select ARCH_REQUIRE_GPIOLIB
14 select ARM_ERRATA_720789
15 select ARM_ERRATA_754322 14 select ARM_ERRATA_754322
15 select ARM_ERRATA_764369
16 select ARM_ERRATA_775420
16 select PL310_ERRATA_753970 if CACHE_PL310 17 select PL310_ERRATA_753970 if CACHE_PL310
17 select PL310_ERRATA_769419 if CACHE_PL310 18 select PL310_ERRATA_769419 if CACHE_PL310
18 help 19 help
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S
index 78ebc7559f53..4c09bae86edf 100644
--- a/arch/arm/mach-sti/headsmp.S
+++ b/arch/arm/mach-sti/headsmp.S
@@ -16,8 +16,6 @@
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19 __INIT
20
21/* 19/*
22 * ST specific entry point for secondary CPUs. This provides 20 * ST specific entry point for secondary CPUs. This provides
23 * a "holding pen" into which all secondary cores are held until we're 21 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
index 977a863468fc..dce50d983a8e 100644
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -27,7 +27,7 @@
27 27
28#include "smp.h" 28#include "smp.h"
29 29
30static void __cpuinit write_pen_release(int val) 30static void write_pen_release(int val)
31{ 31{
32 pen_release = val; 32 pen_release = val;
33 smp_wmb(); 33 smp_wmb();
@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release(int val)
37 37
38static DEFINE_SPINLOCK(boot_lock); 38static DEFINE_SPINLOCK(boot_lock);
39 39
40void __cpuinit sti_secondary_init(unsigned int cpu) 40void sti_secondary_init(unsigned int cpu)
41{ 41{
42 trace_hardirqs_off(); 42 trace_hardirqs_off();
43 43
@@ -54,7 +54,7 @@ void __cpuinit sti_secondary_init(unsigned int cpu)
54 spin_unlock(&boot_lock); 54 spin_unlock(&boot_lock);
55} 55}
56 56
57int __cpuinit sti_boot_secondary(unsigned int cpu, struct task_struct *idle) 57int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
58{ 58{
59 unsigned long timeout; 59 unsigned long timeout;
60 60
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 24db4ac428ae..97b33a2a2d75 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -35,7 +35,7 @@
35 35
36static cpumask_t tegra_cpu_init_mask; 36static cpumask_t tegra_cpu_init_mask;
37 37
38static void __cpuinit tegra_secondary_init(unsigned int cpu) 38static void tegra_secondary_init(unsigned int cpu)
39{ 39{
40 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); 40 cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
41} 41}
@@ -167,7 +167,7 @@ static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle)
167 return ret; 167 return ret;
168} 168}
169 169
170static int __cpuinit tegra_boot_secondary(unsigned int cpu, 170static int tegra_boot_secondary(unsigned int cpu,
171 struct task_struct *idle) 171 struct task_struct *idle)
172{ 172{
173 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20) 173 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20)
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 94e69bee3da5..261fec140c06 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -191,7 +191,7 @@ static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
191 [TEGRA_SUSPEND_LP0] = "LP0", 191 [TEGRA_SUSPEND_LP0] = "LP0",
192}; 192};
193 193
194static int __cpuinit tegra_suspend_enter(suspend_state_t state) 194static int tegra_suspend_enter(suspend_state_t state)
195{ 195{
196 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode(); 196 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
197 197
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 14d90469392f..1f296e796a4f 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -54,7 +54,7 @@ static void __iomem *scu_base_addr(void)
54 54
55static DEFINE_SPINLOCK(boot_lock); 55static DEFINE_SPINLOCK(boot_lock);
56 56
57static void __cpuinit ux500_secondary_init(unsigned int cpu) 57static void ux500_secondary_init(unsigned int cpu)
58{ 58{
59 /* 59 /*
60 * let the primary processor know we're out of the 60 * let the primary processor know we're out of the
@@ -69,7 +69,7 @@ static void __cpuinit ux500_secondary_init(unsigned int cpu)
69 spin_unlock(&boot_lock); 69 spin_unlock(&boot_lock);
70} 70}
71 71
72static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) 72static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
73{ 73{
74 unsigned long timeout; 74 unsigned long timeout;
75 75
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5b799c29886e..5f252569c689 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -91,7 +91,7 @@ static void __init zynq_map_io(void)
91 zynq_scu_map_io(); 91 zynq_scu_map_io();
92} 92}
93 93
94static void zynq_system_reset(char mode, const char *cmd) 94static void zynq_system_reset(enum reboot_mode mode, const char *cmd)
95{ 95{
96 zynq_slcr_system_reset(); 96 zynq_slcr_system_reset();
97} 97}
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index fbbd0e21c404..3040d219570f 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -27,7 +27,7 @@ extern void secondary_startup(void);
27extern char zynq_secondary_trampoline; 27extern char zynq_secondary_trampoline;
28extern char zynq_secondary_trampoline_jump; 28extern char zynq_secondary_trampoline_jump;
29extern char zynq_secondary_trampoline_end; 29extern char zynq_secondary_trampoline_end;
30extern int __cpuinit zynq_cpun_start(u32 address, int cpu); 30extern int zynq_cpun_start(u32 address, int cpu);
31extern struct smp_operations zynq_smp_ops __initdata; 31extern struct smp_operations zynq_smp_ops __initdata;
32#endif 32#endif
33 33
diff --git a/arch/arm/mach-zynq/headsmp.S b/arch/arm/mach-zynq/headsmp.S
index d183cd234a9b..d4cd5f34fe5c 100644
--- a/arch/arm/mach-zynq/headsmp.S
+++ b/arch/arm/mach-zynq/headsmp.S
@@ -9,8 +9,6 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/init.h> 10#include <linux/init.h>
11 11
12 __CPUINIT
13
14ENTRY(zynq_secondary_trampoline) 12ENTRY(zynq_secondary_trampoline)
15 ldr r0, [pc] 13 ldr r0, [pc]
16 bx r0 14 bx r0
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index 023f225493f2..689fbbc3d9c8 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -30,11 +30,11 @@
30/* 30/*
31 * Store number of cores in the system 31 * Store number of cores in the system
32 * Because of scu_get_core_count() must be in __init section and can't 32 * Because of scu_get_core_count() must be in __init section and can't
33 * be called from zynq_cpun_start() because it is in __cpuinit section. 33 * be called from zynq_cpun_start() because it is not in __init section.
34 */ 34 */
35static int ncores; 35static int ncores;
36 36
37int __cpuinit zynq_cpun_start(u32 address, int cpu) 37int zynq_cpun_start(u32 address, int cpu)
38{ 38{
39 u32 trampoline_code_size = &zynq_secondary_trampoline_end - 39 u32 trampoline_code_size = &zynq_secondary_trampoline_end -
40 &zynq_secondary_trampoline; 40 &zynq_secondary_trampoline;
@@ -92,7 +92,7 @@ int __cpuinit zynq_cpun_start(u32 address, int cpu)
92} 92}
93EXPORT_SYMBOL(zynq_cpun_start); 93EXPORT_SYMBOL(zynq_cpun_start);
94 94
95static int __cpuinit zynq_boot_secondary(unsigned int cpu, 95static int zynq_boot_secondary(unsigned int cpu,
96 struct task_struct *idle) 96 struct task_struct *idle)
97{ 97{
98 return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); 98 return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 6cacdc8dd654..db5c2cab8fda 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -421,24 +421,28 @@ config CPU_32v3
421 select CPU_USE_DOMAINS if MMU 421 select CPU_USE_DOMAINS if MMU
422 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 422 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
423 select TLS_REG_EMUL if SMP || !MMU 423 select TLS_REG_EMUL if SMP || !MMU
424 select NEED_KUSER_HELPERS
424 425
425config CPU_32v4 426config CPU_32v4
426 bool 427 bool
427 select CPU_USE_DOMAINS if MMU 428 select CPU_USE_DOMAINS if MMU
428 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 429 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
429 select TLS_REG_EMUL if SMP || !MMU 430 select TLS_REG_EMUL if SMP || !MMU
431 select NEED_KUSER_HELPERS
430 432
431config CPU_32v4T 433config CPU_32v4T
432 bool 434 bool
433 select CPU_USE_DOMAINS if MMU 435 select CPU_USE_DOMAINS if MMU
434 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 436 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
435 select TLS_REG_EMUL if SMP || !MMU 437 select TLS_REG_EMUL if SMP || !MMU
438 select NEED_KUSER_HELPERS
436 439
437config CPU_32v5 440config CPU_32v5
438 bool 441 bool
439 select CPU_USE_DOMAINS if MMU 442 select CPU_USE_DOMAINS if MMU
440 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 443 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
441 select TLS_REG_EMUL if SMP || !MMU 444 select TLS_REG_EMUL if SMP || !MMU
445 select NEED_KUSER_HELPERS
442 446
443config CPU_32v6 447config CPU_32v6
444 bool 448 bool
@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE
776 780
777config TLS_REG_EMUL 781config TLS_REG_EMUL
778 bool 782 bool
783 select NEED_KUSER_HELPERS
779 help 784 help
780 An SMP system using a pre-ARMv6 processor (there are apparently 785 An SMP system using a pre-ARMv6 processor (there are apparently
781 a few prototypes like that in existence) and therefore access to 786 a few prototypes like that in existence) and therefore access to
@@ -783,11 +788,40 @@ config TLS_REG_EMUL
783 788
784config NEEDS_SYSCALL_FOR_CMPXCHG 789config NEEDS_SYSCALL_FOR_CMPXCHG
785 bool 790 bool
791 select NEED_KUSER_HELPERS
786 help 792 help
787 SMP on a pre-ARMv6 processor? Well OK then. 793 SMP on a pre-ARMv6 processor? Well OK then.
788 Forget about fast user space cmpxchg support. 794 Forget about fast user space cmpxchg support.
789 It is just not possible. 795 It is just not possible.
790 796
797config NEED_KUSER_HELPERS
798 bool
799
800config KUSER_HELPERS
801 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
802 default y
803 help
804 Warning: disabling this option may break user programs.
805
806 Provide kuser helpers in the vector page. The kernel provides
807 helper code to userspace in read only form at a fixed location
808 in the high vector page to allow userspace to be independent of
809 the CPU type fitted to the system. This permits binaries to be
810 run on ARMv4 through to ARMv7 without modification.
811
812 However, the fixed address nature of these helpers can be used
813 by ROP (return orientated programming) authors when creating
814 exploits.
815
816 If all of the binaries and libraries which run on your platform
817 are built specifically for your platform, and make no use of
818 these helpers, then you can turn this option off. However,
819 when such an binary or library is run, it will receive a SIGILL
820 signal, which will terminate the program.
821
822 Say N here only if you are absolutely certain that you do not
823 need these helpers; otherwise, the safe option is to say Y.
824
791config DMA_CACHE_RWFO 825config DMA_CACHE_RWFO
792 bool "Enable read/write for ownership DMA cache maintenance" 826 bool "Enable read/write for ownership DMA cache maintenance"
793 depends on CPU_V6K && SMP 827 depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b55b1015724b..4a0544492f10 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
245 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 245 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
246 local_flush_bp_all(); 246 local_flush_bp_all();
247 local_flush_tlb_all(); 247 local_flush_tlb_all();
248 dummy_flush_tlb_a15_erratum(); 248 if (erratum_a15_798181())
249 dummy_flush_tlb_a15_erratum();
249 } 250 }
250 251
251 atomic64_set(&per_cpu(active_asids, cpu), asid); 252 atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f56617a2392..53cdbd39ec8e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
989 989
990void __init sanity_check_meminfo(void) 990void __init sanity_check_meminfo(void)
991{ 991{
992 phys_addr_t memblock_limit = 0;
992 int i, j, highmem = 0; 993 int i, j, highmem = 0;
993 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 994 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
994 995
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void)
1052 bank->size = size_limit; 1053 bank->size = size_limit;
1053 } 1054 }
1054#endif 1055#endif
1055 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) 1056 if (!bank->highmem) {
1056 arm_lowmem_limit = bank->start + bank->size; 1057 phys_addr_t bank_end = bank->start + bank->size;
1057 1058
1059 if (bank_end > arm_lowmem_limit)
1060 arm_lowmem_limit = bank_end;
1061
1062 /*
1063 * Find the first non-section-aligned page, and point
1064 * memblock_limit at it. This relies on rounding the
1065 * limit down to be section-aligned, which happens at
1066 * the end of this function.
1067 *
1068 * With this algorithm, the start or end of almost any
1069 * bank can be non-section-aligned. The only exception
1070 * is that the start of the bank 0 must be section-
1071 * aligned, since otherwise memory would need to be
1072 * allocated when mapping the start of bank 0, which
1073 * occurs before any free memory is mapped.
1074 */
1075 if (!memblock_limit) {
1076 if (!IS_ALIGNED(bank->start, SECTION_SIZE))
1077 memblock_limit = bank->start;
1078 else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
1079 memblock_limit = bank_end;
1080 }
1081 }
1058 j++; 1082 j++;
1059 } 1083 }
1060#ifdef CONFIG_HIGHMEM 1084#ifdef CONFIG_HIGHMEM
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void)
1079#endif 1103#endif
1080 meminfo.nr_banks = j; 1104 meminfo.nr_banks = j;
1081 high_memory = __va(arm_lowmem_limit - 1) + 1; 1105 high_memory = __va(arm_lowmem_limit - 1) + 1;
1082 memblock_set_current_limit(arm_lowmem_limit); 1106
1107 /*
1108 * Round the memblock limit down to a section size. This
1109 * helps to ensure that we will allocate memory from the
1110 * last full section, which should be mapped.
1111 */
1112 if (memblock_limit)
1113 memblock_limit = round_down(memblock_limit, SECTION_SIZE);
1114 if (!memblock_limit)
1115 memblock_limit = arm_lowmem_limit;
1116
1117 memblock_set_current_limit(memblock_limit);
1083} 1118}
1084 1119
1085static inline void prepare_page_table(void) 1120static inline void prepare_page_table(void)
@@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1160 /* 1195 /*
1161 * Allocate the vector page early. 1196 * Allocate the vector page early.
1162 */ 1197 */
1163 vectors = early_alloc(PAGE_SIZE); 1198 vectors = early_alloc(PAGE_SIZE * 2);
1164 1199
1165 early_trap_init(vectors); 1200 early_trap_init(vectors);
1166 1201
@@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1205 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 1240 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1206 map.virtual = 0xffff0000; 1241 map.virtual = 0xffff0000;
1207 map.length = PAGE_SIZE; 1242 map.length = PAGE_SIZE;
1243#ifdef CONFIG_KUSER_HELPERS
1208 map.type = MT_HIGH_VECTORS; 1244 map.type = MT_HIGH_VECTORS;
1245#else
1246 map.type = MT_LOW_VECTORS;
1247#endif
1209 create_mapping(&map); 1248 create_mapping(&map);
1210 1249
1211 if (!vectors_high()) { 1250 if (!vectors_high()) {
1212 map.virtual = 0; 1251 map.virtual = 0;
1252 map.length = PAGE_SIZE * 2;
1213 map.type = MT_LOW_VECTORS; 1253 map.type = MT_LOW_VECTORS;
1214 create_mapping(&map); 1254 create_mapping(&map);
1215 } 1255 }
1216 1256
1257 /* Now create a kernel read-only mapping */
1258 map.pfn += 1;
1259 map.virtual = 0xffff0000 + PAGE_SIZE;
1260 map.length = PAGE_SIZE;
1261 map.type = MT_LOW_VECTORS;
1262 create_mapping(&map);
1263
1217 /* 1264 /*
1218 * Ask the machine support to map in the statically mapped devices. 1265 * Ask the machine support to map in the statically mapped devices.
1219 */ 1266 */
@@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc)
1276{ 1323{
1277 void *zero_page; 1324 void *zero_page;
1278 1325
1279 memblock_set_current_limit(arm_lowmem_limit);
1280
1281 build_mem_type_table(); 1326 build_mem_type_table();
1282 prepare_page_table(); 1327 prepare_page_table();
1283 map_lowmem(); 1328 map_lowmem();
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2bb61e703d6c..d1a2d05971e0 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -443,8 +443,6 @@ ENTRY(cpu_arm1020_set_pte_ext)
443#endif /* CONFIG_MMU */ 443#endif /* CONFIG_MMU */
444 mov pc, lr 444 mov pc, lr
445 445
446 __CPUINIT
447
448 .type __arm1020_setup, #function 446 .type __arm1020_setup, #function
449__arm1020_setup: 447__arm1020_setup:
450 mov r0, #0 448 mov r0, #0
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 8f96aa40f510..9d89405c3d03 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -425,8 +425,6 @@ ENTRY(cpu_arm1020e_set_pte_ext)
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 mov pc, lr
427 427
428 __CPUINIT
429
430 .type __arm1020e_setup, #function 428 .type __arm1020e_setup, #function
431__arm1020e_setup: 429__arm1020e_setup:
432 mov r0, #0 430 mov r0, #0
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8ebe4a469a22..6f01a0ae3b30 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -407,8 +407,6 @@ ENTRY(cpu_arm1022_set_pte_ext)
407#endif /* CONFIG_MMU */ 407#endif /* CONFIG_MMU */
408 mov pc, lr 408 mov pc, lr
409 409
410 __CPUINIT
411
412 .type __arm1022_setup, #function 410 .type __arm1022_setup, #function
413__arm1022_setup: 411__arm1022_setup:
414 mov r0, #0 412 mov r0, #0
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 093fc7e520c3..4799a24b43e6 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -396,9 +396,6 @@ ENTRY(cpu_arm1026_set_pte_ext)
396#endif /* CONFIG_MMU */ 396#endif /* CONFIG_MMU */
397 mov pc, lr 397 mov pc, lr
398 398
399
400 __CPUINIT
401
402 .type __arm1026_setup, #function 399 .type __arm1026_setup, #function
403__arm1026_setup: 400__arm1026_setup:
404 mov r0, #0 401 mov r0, #0
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 0ac908c7ade1..d42c37f9f5bc 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -116,8 +116,6 @@ ENTRY(cpu_arm720_reset)
116ENDPROC(cpu_arm720_reset) 116ENDPROC(cpu_arm720_reset)
117 .popsection 117 .popsection
118 118
119 __CPUINIT
120
121 .type __arm710_setup, #function 119 .type __arm710_setup, #function
122__arm710_setup: 120__arm710_setup:
123 mov r0, #0 121 mov r0, #0
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index fde2d2a794cf..9b0ae90cbf17 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -60,8 +60,6 @@ ENTRY(cpu_arm740_reset)
60ENDPROC(cpu_arm740_reset) 60ENDPROC(cpu_arm740_reset)
61 .popsection 61 .popsection
62 62
63 __CPUINIT
64
65 .type __arm740_setup, #function 63 .type __arm740_setup, #function
66__arm740_setup: 64__arm740_setup:
67 mov r0, #0 65 mov r0, #0
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 6ddea3e464bd..f6cc3f63ce39 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -51,8 +51,6 @@ ENTRY(cpu_arm7tdmi_reset)
51ENDPROC(cpu_arm7tdmi_reset) 51ENDPROC(cpu_arm7tdmi_reset)
52 .popsection 52 .popsection
53 53
54 __CPUINIT
55
56 .type __arm7tdmi_setup, #function 54 .type __arm7tdmi_setup, #function
57__arm7tdmi_setup: 55__arm7tdmi_setup:
58 mov pc, lr 56 mov pc, lr
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2556cf1c2da1..549557df6d57 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -410,8 +410,6 @@ ENTRY(cpu_arm920_do_resume)
410ENDPROC(cpu_arm920_do_resume) 410ENDPROC(cpu_arm920_do_resume)
411#endif 411#endif
412 412
413 __CPUINIT
414
415 .type __arm920_setup, #function 413 .type __arm920_setup, #function
416__arm920_setup: 414__arm920_setup:
417 mov r0, #0 415 mov r0, #0
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4464c49d7449..2a758b06c6f6 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -388,8 +388,6 @@ ENTRY(cpu_arm922_set_pte_ext)
388#endif /* CONFIG_MMU */ 388#endif /* CONFIG_MMU */
389 mov pc, lr 389 mov pc, lr
390 390
391 __CPUINIT
392
393 .type __arm922_setup, #function 391 .type __arm922_setup, #function
394__arm922_setup: 392__arm922_setup:
395 mov r0, #0 393 mov r0, #0
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 281eb9b9c1d6..97448c3acf38 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -438,8 +438,6 @@ ENTRY(cpu_arm925_set_pte_ext)
438#endif /* CONFIG_MMU */ 438#endif /* CONFIG_MMU */
439 mov pc, lr 439 mov pc, lr
440 440
441 __CPUINIT
442
443 .type __arm925_setup, #function 441 .type __arm925_setup, #function
444__arm925_setup: 442__arm925_setup:
445 mov r0, #0 443 mov r0, #0
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 344c8a548cc0..0f098f407c9f 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -425,8 +425,6 @@ ENTRY(cpu_arm926_do_resume)
425ENDPROC(cpu_arm926_do_resume) 425ENDPROC(cpu_arm926_do_resume)
426#endif 426#endif
427 427
428 __CPUINIT
429
430 .type __arm926_setup, #function 428 .type __arm926_setup, #function
431__arm926_setup: 429__arm926_setup:
432 mov r0, #0 430 mov r0, #0
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 8da189d4a402..1c39a704ff6e 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -273,8 +273,6 @@ ENDPROC(arm940_dma_unmap_area)
273 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 273 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
274 define_cache_functions arm940 274 define_cache_functions arm940
275 275
276 __CPUINIT
277
278 .type __arm940_setup, #function 276 .type __arm940_setup, #function
279__arm940_setup: 277__arm940_setup:
280 mov r0, #0 278 mov r0, #0
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f666cf34075a..0289cd905e73 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -326,8 +326,6 @@ ENTRY(cpu_arm946_dcache_clean_area)
326 mcr p15, 0, r0, c7, c10, 4 @ drain WB 326 mcr p15, 0, r0, c7, c10, 4 @ drain WB
327 mov pc, lr 327 mov pc, lr
328 328
329 __CPUINIT
330
331 .type __arm946_setup, #function 329 .type __arm946_setup, #function
332__arm946_setup: 330__arm946_setup:
333 mov r0, #0 331 mov r0, #0
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 8881391dfb9e..f51197ba754a 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -51,8 +51,6 @@ ENTRY(cpu_arm9tdmi_reset)
51ENDPROC(cpu_arm9tdmi_reset) 51ENDPROC(cpu_arm9tdmi_reset)
52 .popsection 52 .popsection
53 53
54 __CPUINIT
55
56 .type __arm9tdmi_setup, #function 54 .type __arm9tdmi_setup, #function
57__arm9tdmi_setup: 55__arm9tdmi_setup:
58 mov pc, lr 56 mov pc, lr
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index aaeb6c127c7a..2dfc0f1d3bfd 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -135,8 +135,6 @@ ENTRY(cpu_fa526_set_pte_ext)
135#endif 135#endif
136 mov pc, lr 136 mov pc, lr
137 137
138 __CPUINIT
139
140 .type __fa526_setup, #function 138 .type __fa526_setup, #function
141__fa526_setup: 139__fa526_setup:
142 /* On return of this routine, r0 must carry correct flags for CFG register */ 140 /* On return of this routine, r0 must carry correct flags for CFG register */
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 4106b09e0c29..d5146b98c8d1 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -514,8 +514,6 @@ ENTRY(cpu_feroceon_set_pte_ext)
514#endif 514#endif
515 mov pc, lr 515 mov pc, lr
516 516
517 __CPUINIT
518
519 .type __feroceon_setup, #function 517 .type __feroceon_setup, #function
520__feroceon_setup: 518__feroceon_setup:
521 mov r0, #0 519 mov r0, #0
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 0b60dd3d742a..40acba595731 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -383,8 +383,6 @@ ENTRY(cpu_mohawk_do_resume)
383ENDPROC(cpu_mohawk_do_resume) 383ENDPROC(cpu_mohawk_do_resume)
384#endif 384#endif
385 385
386 __CPUINIT
387
388 .type __mohawk_setup, #function 386 .type __mohawk_setup, #function
389__mohawk_setup: 387__mohawk_setup:
390 mov r0, #0 388 mov r0, #0
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 775d70fba937..c45319c8f1d9 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -159,8 +159,6 @@ ENTRY(cpu_sa110_set_pte_ext)
159#endif 159#endif
160 mov pc, lr 160 mov pc, lr
161 161
162 __CPUINIT
163
164 .type __sa110_setup, #function 162 .type __sa110_setup, #function
165__sa110_setup: 163__sa110_setup:
166 mov r10, #0 164 mov r10, #0
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index d92dfd081429..09d241ae2dbe 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -198,8 +198,6 @@ ENTRY(cpu_sa1100_do_resume)
198ENDPROC(cpu_sa1100_do_resume) 198ENDPROC(cpu_sa1100_do_resume)
199#endif 199#endif
200 200
201 __CPUINIT
202
203 .type __sa1100_setup, #function 201 .type __sa1100_setup, #function
204__sa1100_setup: 202__sa1100_setup:
205 mov r0, #0 203 mov r0, #0
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 2d1ef87328a1..1128064fddcb 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -180,8 +180,6 @@ ENDPROC(cpu_v6_do_resume)
180 180
181 .align 181 .align
182 182
183 __CPUINIT
184
185/* 183/*
186 * __v6_setup 184 * __v6_setup
187 * 185 *
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 9704097c450e..bdd3be4be77a 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
110 ARM( str r3, [r0, #2048]! ) 110 ARM( str r3, [r0, #2048]! )
111 THUMB( add r0, r0, #2048 ) 111 THUMB( add r0, r0, #2048 )
112 THUMB( str r3, [r0] ) 112 THUMB( str r3, [r0] )
113 ALT_SMP(mov pc,lr) 113 ALT_SMP(W(nop))
114 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 114 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
115#endif 115#endif
116 mov pc, lr 116 mov pc, lr
@@ -160,8 +160,6 @@ ENDPROC(cpu_v7_set_pte_ext)
160 mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 160 mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
161 .endm 161 .endm
162 162
163 __CPUINIT
164
165 /* AT 163 /* AT
166 * TFR EV X F I D LR S 164 * TFR EV X F I D LR S
167 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM 165 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
@@ -172,5 +170,3 @@ ENDPROC(cpu_v7_set_pte_ext)
172 .type v7_crval, #object 170 .type v7_crval, #object
173v7_crval: 171v7_crval:
174 crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c 172 crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
175
176 .previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 5ffe1956c6d9..01a719e18bb0 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
81 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY 81 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
82 orreq r2, #L_PTE_RDONLY 82 orreq r2, #L_PTE_RDONLY
831: strd r2, r3, [r0] 831: strd r2, r3, [r0]
84 ALT_SMP(mov pc, lr) 84 ALT_SMP(W(nop))
85 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 85 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
86#endif 86#endif
87 mov pc, lr 87 mov pc, lr
@@ -140,8 +140,6 @@ ENDPROC(cpu_v7_set_pte_ext)
140 mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 140 mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0
141 .endm 141 .endm
142 142
143 __CPUINIT
144
145 /* 143 /*
146 * AT 144 * AT
147 * TFR EV X F IHD LR S 145 * TFR EV X F IHD LR S
@@ -153,5 +151,3 @@ ENDPROC(cpu_v7_set_pte_ext)
153 .type v7_crval, #object 151 .type v7_crval, #object
154v7_crval: 152v7_crval:
155 crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c 153 crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
156
157 .previous
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7ef3ad05df39..73398bcf9bd8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
75ENDPROC(cpu_v7_do_idle) 75ENDPROC(cpu_v7_do_idle)
76 76
77ENTRY(cpu_v7_dcache_clean_area) 77ENTRY(cpu_v7_dcache_clean_area)
78 ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW 78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
79 ALT_UP(W(nop)) 79 ALT_UP_B(1f)
80 dcache_line_size r2, r3 80 mov pc, lr
811: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 811: dcache_line_size r2, r3
822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
82 add r0, r0, r2 83 add r0, r0, r2
83 subs r1, r1, r2 84 subs r1, r1, r2
84 bhi 1b 85 bhi 2b
85 dsb 86 dsb
86 mov pc, lr 87 mov pc, lr
87ENDPROC(cpu_v7_dcache_clean_area) 88ENDPROC(cpu_v7_dcache_clean_area)
@@ -167,8 +168,6 @@ ENDPROC(cpu_pj4b_do_idle)
167 168
168#endif 169#endif
169 170
170 __CPUINIT
171
172/* 171/*
173 * __v7_setup 172 * __v7_setup
174 * 173 *
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index e8efd83b6f25..dc1645890042 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -446,8 +446,6 @@ ENTRY(cpu_xsc3_do_resume)
446ENDPROC(cpu_xsc3_do_resume) 446ENDPROC(cpu_xsc3_do_resume)
447#endif 447#endif
448 448
449 __CPUINIT
450
451 .type __xsc3_setup, #function 449 .type __xsc3_setup, #function
452__xsc3_setup: 450__xsc3_setup:
453 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 451 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index e766f889bfd6..d19b1cfcad91 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -558,8 +558,6 @@ ENTRY(cpu_xscale_do_resume)
558ENDPROC(cpu_xscale_do_resume) 558ENDPROC(cpu_xscale_do_resume)
559#endif 559#endif
560 560
561 __CPUINIT
562
563 .type __xscale_setup, #function 561 .type __xscale_setup, #function
564__xscale_setup: 562__xscale_setup:
565 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 563 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 3dc5cbea86cc..a5b5ff6e68d2 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -29,6 +29,13 @@ config PLAT_S5P
29 help 29 help
30 Base platform code for Samsung's S5P series SoC. 30 Base platform code for Samsung's S5P series SoC.
31 31
32config SAMSUNG_PM
33 bool
34 depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || S5P_PM)
35 default y
36 help
37 Base platform power management code for samsung code
38
32if PLAT_SAMSUNG 39if PLAT_SAMSUNG
33 40
34# boot configurations 41# boot configurations
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 98d07d8fc7a7..199bbe304d02 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -51,7 +51,7 @@ obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
51 51
52# PM support 52# PM support
53 53
54obj-$(CONFIG_PM) += pm.o 54obj-$(CONFIG_SAMSUNG_PM) += pm.o
55obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o 55obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o
56obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o 56obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
57 57
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index a62753dc15ba..df45d6edc98d 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -83,6 +83,11 @@ extern struct clk clk_ext;
83extern struct clksrc_clk clk_epllref; 83extern struct clksrc_clk clk_epllref;
84extern struct clksrc_clk clk_esysclk; 84extern struct clksrc_clk clk_esysclk;
85 85
86/* S3C24XX UART clocks */
87extern struct clk s3c24xx_clk_uart0;
88extern struct clk s3c24xx_clk_uart1;
89extern struct clk s3c24xx_clk_uart2;
90
86/* S3C64XX specific clocks */ 91/* S3C64XX specific clocks */
87extern struct clk clk_h2; 92extern struct clk clk_h2;
88extern struct clk clk_27m; 93extern struct clk clk_27m;
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 5d47ca35cabd..6bc1a8f471e3 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -19,7 +19,7 @@
19 19
20struct device; 20struct device;
21 21
22#ifdef CONFIG_PM 22#ifdef CONFIG_SAMSUNG_PM
23 23
24extern __init int s3c_pm_init(void); 24extern __init int s3c_pm_init(void);
25extern __init int s3c64xx_pm_init(void); 25extern __init int s3c64xx_pm_init(void);
@@ -58,8 +58,6 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
58 58
59/* from sleep.S */ 59/* from sleep.S */
60 60
61extern void s3c_cpu_resume(void);
62
63extern int s3c2410_cpu_suspend(unsigned long); 61extern int s3c2410_cpu_suspend(unsigned long);
64 62
65/* sleep save info */ 63/* sleep save info */
@@ -106,12 +104,14 @@ extern void s3c_pm_do_save(struct sleep_save *ptr, int count);
106extern void s3c_pm_do_restore(struct sleep_save *ptr, int count); 104extern void s3c_pm_do_restore(struct sleep_save *ptr, int count);
107extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count); 105extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
108 106
109#ifdef CONFIG_PM 107#ifdef CONFIG_SAMSUNG_PM
110extern int s3c_irq_wake(struct irq_data *data, unsigned int state); 108extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
111extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); 109extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
110extern void s3c_cpu_resume(void);
112#else 111#else
113#define s3c_irq_wake NULL 112#define s3c_irq_wake NULL
114#define s3c_irqext_wake NULL 113#define s3c_irqext_wake NULL
114#define s3c_cpu_resume NULL
115#endif 115#endif
116 116
117/* PM debug functions */ 117/* PM debug functions */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index ea3613642451..d0c23010b693 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -80,7 +80,7 @@ unsigned char pm_uart_udivslot;
80 80
81#ifdef CONFIG_SAMSUNG_PM_DEBUG 81#ifdef CONFIG_SAMSUNG_PM_DEBUG
82 82
83static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; 83static struct pm_uart_save uart_save;
84 84
85static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) 85static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
86{ 86{
@@ -101,11 +101,7 @@ static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
101 101
102static void s3c_pm_save_uarts(void) 102static void s3c_pm_save_uarts(void)
103{ 103{
104 struct pm_uart_save *save = uart_save; 104 s3c_pm_save_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
105 unsigned int uart;
106
107 for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
108 s3c_pm_save_uart(uart, save);
109} 105}
110 106
111static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save) 107static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
@@ -126,11 +122,7 @@ static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
126 122
127static void s3c_pm_restore_uarts(void) 123static void s3c_pm_restore_uarts(void)
128{ 124{
129 struct pm_uart_save *save = uart_save; 125 s3c_pm_restore_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
130 unsigned int uart;
131
132 for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
133 s3c_pm_restore_uart(uart, save);
134} 126}
135#else 127#else
136static void s3c_pm_save_uarts(void) { } 128static void s3c_pm_save_uarts(void) { }
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index 1e1b2d769748..39895d892c3b 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -23,7 +23,7 @@
23 * observers, irrespective of whether they're taking part in coherency 23 * observers, irrespective of whether they're taking part in coherency
24 * or not. This is necessary for the hotplug code to work reliably. 24 * or not. This is necessary for the hotplug code to work reliably.
25 */ 25 */
26static void __cpuinit write_pen_release(int val) 26static void write_pen_release(int val)
27{ 27{
28 pen_release = val; 28 pen_release = val;
29 smp_wmb(); 29 smp_wmb();
@@ -33,7 +33,7 @@ static void __cpuinit write_pen_release(int val)
33 33
34static DEFINE_SPINLOCK(boot_lock); 34static DEFINE_SPINLOCK(boot_lock);
35 35
36void __cpuinit versatile_secondary_init(unsigned int cpu) 36void versatile_secondary_init(unsigned int cpu)
37{ 37{
38 /* 38 /*
39 * let the primary processor know we're out of the 39 * let the primary processor know we're out of the
@@ -48,7 +48,7 @@ void __cpuinit versatile_secondary_init(unsigned int cpu)
48 spin_unlock(&boot_lock); 48 spin_unlock(&boot_lock);
49} 49}
50 50
51int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) 51int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
52{ 52{
53 unsigned long timeout; 53 unsigned long timeout;
54 54
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f71c37edca26..c9770ba5c7df 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -172,7 +172,7 @@ static void __init xen_percpu_init(void *unused)
172 enable_percpu_irq(xen_events_irq, 0); 172 enable_percpu_irq(xen_events_irq, 0);
173} 173}
174 174
175static void xen_restart(char str, const char *cmd) 175static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
176{ 176{
177 struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; 177 struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
178 int rc; 178 int rc;
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index d56ed11ba9a3..98abd476992d 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -97,7 +97,7 @@ static inline u32 arch_timer_get_cntfrq(void)
97 return val; 97 return val;
98} 98}
99 99
100static inline void __cpuinit arch_counter_set_user_access(void) 100static inline void arch_counter_set_user_access(void)
101{ 101{
102 u32 cntkctl; 102 u32 cntkctl;
103 103
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index ef8235c68c09..a2232d07be9d 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -83,14 +83,7 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
83} 83}
84#endif 84#endif
85 85
86#ifdef CONFIG_COMPAT
87int aarch32_break_handler(struct pt_regs *regs); 86int aarch32_break_handler(struct pt_regs *regs);
88#else
89static int aarch32_break_handler(struct pt_regs *regs)
90{
91 return -EFAULT;
92}
93#endif
94 87
95#endif /* __ASSEMBLY */ 88#endif /* __ASSEMBLY */
96#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index a6e1750369ef..7a18fabbe0f6 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,6 +23,7 @@
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/linkage.h> 24#include <linux/linkage.h>
25#include <linux/irqflags.h> 25#include <linux/irqflags.h>
26#include <linux/reboot.h>
26 27
27struct pt_regs; 28struct pt_regs;
28 29
@@ -41,7 +42,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
41extern void __show_regs(struct pt_regs *); 42extern void __show_regs(struct pt_regs *);
42 43
43void soft_restart(unsigned long); 44void soft_restart(unsigned long);
44extern void (*arm_pm_restart)(char str, const char *cmd); 45extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
45 46
46#define UDBG_UNDEFINED (1 << 0) 47#define UDBG_UNDEFINED (1 << 0)
47#define UDBG_SYSCALL (1 << 1) 48#define UDBG_SYSCALL (1 << 1)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 3659e460071d..23a3c4791d86 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25 25
26#ifndef CONFIG_ARM64_64K_PAGES 26#ifndef CONFIG_ARM64_64K_PAGES
27#define THREAD_SIZE_ORDER 1 27#define THREAD_SIZE_ORDER 2
28#endif 28#endif
29 29
30#define THREAD_SIZE 8192 30#define THREAD_SIZE 16384
31#define THREAD_START_SP (THREAD_SIZE - 16) 31#define THREAD_START_SP (THREAD_SIZE - 16)
32 32
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439827271e3d..26e310c54344 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -21,6 +21,7 @@
21#define BOOT_CPU_MODE_EL2 (0x0e12b007) 21#define BOOT_CPU_MODE_EL2 (0x0e12b007)
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24#include <asm/cacheflush.h>
24 25
25/* 26/*
26 * __boot_cpu_mode records what mode CPUs were booted in. 27 * __boot_cpu_mode records what mode CPUs were booted in.
@@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
36void __hyp_set_vectors(phys_addr_t phys_vector_base); 37void __hyp_set_vectors(phys_addr_t phys_vector_base);
37phys_addr_t __hyp_get_vectors(void); 38phys_addr_t __hyp_get_vectors(void);
38 39
40static inline void sync_boot_mode(void)
41{
42 /*
43 * As secondaries write to __boot_cpu_mode with caches disabled, we
44 * must flush the corresponding cache entries to ensure the visibility
45 * of their writes.
46 */
47 __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
48}
49
39/* Reports the availability of HYP mode */ 50/* Reports the availability of HYP mode */
40static inline bool is_hyp_mode_available(void) 51static inline bool is_hyp_mode_available(void)
41{ 52{
53 sync_boot_mode();
42 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && 54 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
43 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); 55 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
44} 56}
@@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
46/* Check if the bootloader has booted CPUs in different modes */ 58/* Check if the bootloader has booted CPUs in different modes */
47static inline bool is_hyp_mode_mismatched(void) 59static inline bool is_hyp_mode_mismatched(void)
48{ 60{
61 sync_boot_mode();
49 return __boot_cpu_mode[0] != __boot_cpu_mode[1]; 62 return __boot_cpu_mode[0] != __boot_cpu_mode[1];
50} 63}
51 64
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 08018e3df580..cbfacf7fb438 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -141,7 +141,7 @@ static void clear_os_lock(void *unused)
141 isb(); 141 isb();
142} 142}
143 143
144static int __cpuinit os_lock_notify(struct notifier_block *self, 144static int os_lock_notify(struct notifier_block *self,
145 unsigned long action, void *data) 145 unsigned long action, void *data)
146{ 146{
147 int cpu = (unsigned long)data; 147 int cpu = (unsigned long)data;
@@ -150,11 +150,11 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
150 return NOTIFY_OK; 150 return NOTIFY_OK;
151} 151}
152 152
153static struct notifier_block __cpuinitdata os_lock_nb = { 153static struct notifier_block os_lock_nb = {
154 .notifier_call = os_lock_notify, 154 .notifier_call = os_lock_notify,
155}; 155};
156 156
157static int __cpuinit debug_monitors_init(void) 157static int debug_monitors_init(void)
158{ 158{
159 /* Clear the OS lock. */ 159 /* Clear the OS lock. */
160 smp_call_function(clear_os_lock, NULL, 1); 160 smp_call_function(clear_os_lock, NULL, 1);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a03..6ad781b21c08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
121 121
122 .macro get_thread_info, rd 122 .macro get_thread_info, rd
123 mov \rd, sp 123 mov \rd, sp
124 and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack 124 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
125 .endm 125 .endm
126 126
127/* 127/*
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 5ab825c59db9..329218ca9ffb 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -821,7 +821,7 @@ static void reset_ctrl_regs(void *unused)
821 } 821 }
822} 822}
823 823
824static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self, 824static int hw_breakpoint_reset_notify(struct notifier_block *self,
825 unsigned long action, 825 unsigned long action,
826 void *hcpu) 826 void *hcpu)
827{ 827{
@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
831 return NOTIFY_OK; 831 return NOTIFY_OK;
832} 832}
833 833
834static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = { 834static struct notifier_block hw_breakpoint_reset_nb = {
835 .notifier_call = hw_breakpoint_reset_notify, 835 .notifier_call = hw_breakpoint_reset_notify,
836}; 836};
837 837
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 46f02c3b5015..57fb55c44c90 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -81,7 +81,7 @@ void soft_restart(unsigned long addr)
81void (*pm_power_off)(void); 81void (*pm_power_off)(void);
82EXPORT_SYMBOL_GPL(pm_power_off); 82EXPORT_SYMBOL_GPL(pm_power_off);
83 83
84void (*arm_pm_restart)(char str, const char *cmd); 84void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
85EXPORT_SYMBOL_GPL(arm_pm_restart); 85EXPORT_SYMBOL_GPL(arm_pm_restart);
86 86
87void arch_cpu_idle_prepare(void) 87void arch_cpu_idle_prepare(void)
@@ -132,7 +132,7 @@ void machine_restart(char *cmd)
132 132
133 /* Now call the architecture specific reboot code. */ 133 /* Now call the architecture specific reboot code. */
134 if (arm_pm_restart) 134 if (arm_pm_restart)
135 arm_pm_restart('h', cmd); 135 arm_pm_restart(reboot_mode, cmd);
136 136
137 /* 137 /*
138 * Whoops - the architecture was unable to reboot. 138 * Whoops - the architecture was unable to reboot.
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5d54e3717bf8..fee5cce83450 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -71,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
71 * in coherency or not. This is necessary for the hotplug code to work 71 * in coherency or not. This is necessary for the hotplug code to work
72 * reliably. 72 * reliably.
73 */ 73 */
74static void __cpuinit write_pen_release(u64 val) 74static void write_pen_release(u64 val)
75{ 75{
76 void *start = (void *)&secondary_holding_pen_release; 76 void *start = (void *)&secondary_holding_pen_release;
77 unsigned long size = sizeof(secondary_holding_pen_release); 77 unsigned long size = sizeof(secondary_holding_pen_release);
@@ -84,7 +84,7 @@ static void __cpuinit write_pen_release(u64 val)
84 * Boot a secondary CPU, and assign it the specified idle task. 84 * Boot a secondary CPU, and assign it the specified idle task.
85 * This also gives us the initial stack to use for this CPU. 85 * This also gives us the initial stack to use for this CPU.
86 */ 86 */
87static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 87static int boot_secondary(unsigned int cpu, struct task_struct *idle)
88{ 88{
89 unsigned long timeout; 89 unsigned long timeout;
90 90
@@ -122,7 +122,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
122 122
123static DECLARE_COMPLETION(cpu_running); 123static DECLARE_COMPLETION(cpu_running);
124 124
125int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 125int __cpu_up(unsigned int cpu, struct task_struct *idle)
126{ 126{
127 int ret; 127 int ret;
128 128
@@ -162,7 +162,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
162 * This is the secondary CPU boot entry. We're using this CPUs 162 * This is the secondary CPU boot entry. We're using this CPUs
163 * idle thread stack, but a set of temporary page tables. 163 * idle thread stack, but a set of temporary page tables.
164 */ 164 */
165asmlinkage void __cpuinit secondary_start_kernel(void) 165asmlinkage void secondary_start_kernel(void)
166{ 166{
167 struct mm_struct *mm = &init_mm; 167 struct mm_struct *mm = &init_mm;
168 unsigned int cpu = smp_processor_id(); 168 unsigned int cpu = smp_processor_id();
@@ -200,13 +200,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
200 raw_spin_unlock(&boot_lock); 200 raw_spin_unlock(&boot_lock);
201 201
202 /* 202 /*
203 * Enable local interrupts.
204 */
205 notify_cpu_starting(cpu);
206 local_irq_enable();
207 local_fiq_enable();
208
209 /*
210 * OK, now it's safe to let the boot CPU continue. Wait for 203 * OK, now it's safe to let the boot CPU continue. Wait for
211 * the CPU migration code to notice that the CPU is online 204 * the CPU migration code to notice that the CPU is online
212 * before we continue. 205 * before we continue.
@@ -215,6 +208,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
215 complete(&cpu_running); 208 complete(&cpu_running);
216 209
217 /* 210 /*
211 * Enable GIC and timers.
212 */
213 notify_cpu_starting(cpu);
214
215 local_irq_enable();
216 local_fiq_enable();
217
218 /*
218 * OK, it's off to the idle thread for us 219 * OK, it's off to the idle thread for us
219 */ 220 */
220 cpu_startup_entry(CPUHP_ONLINE); 221 cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0ecac8980aae..6c8ba25bf6bb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
152#define ESR_CM (1 << 8) 152#define ESR_CM (1 << 8)
153#define ESR_LNX_EXEC (1 << 24) 153#define ESR_LNX_EXEC (1 << 24)
154 154
155/*
156 * Check that the permissions on the VMA allow for the fault which occurred.
157 * If we encountered a write fault, we must have write permission, otherwise
158 * we allow any permission.
159 */
160static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
161{
162 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
163
164 if (esr & ESR_WRITE)
165 mask = VM_WRITE;
166 if (esr & ESR_LNX_EXEC)
167 mask = VM_EXEC;
168
169 return vma->vm_flags & mask ? false : true;
170}
171
172static int __do_page_fault(struct mm_struct *mm, unsigned long addr, 155static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
173 unsigned int esr, unsigned int flags, 156 unsigned int mm_flags, unsigned long vm_flags,
174 struct task_struct *tsk) 157 struct task_struct *tsk)
175{ 158{
176 struct vm_area_struct *vma; 159 struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
188 * it. 171 * it.
189 */ 172 */
190good_area: 173good_area:
191 if (access_error(esr, vma)) { 174 /*
175 * Check that the permissions on the VMA allow for the fault which
176 * occurred. If we encountered a write or exec fault, we must have
177 * appropriate permissions, otherwise we allow any permission.
178 */
179 if (!(vma->vm_flags & vm_flags)) {
192 fault = VM_FAULT_BADACCESS; 180 fault = VM_FAULT_BADACCESS;
193 goto out; 181 goto out;
194 } 182 }
195 183
196 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 184 return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
197 185
198check_stack: 186check_stack:
199 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 187 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
208 struct task_struct *tsk; 196 struct task_struct *tsk;
209 struct mm_struct *mm; 197 struct mm_struct *mm;
210 int fault, sig, code; 198 int fault, sig, code;
211 bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); 199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
212 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
213 (write ? FAULT_FLAG_WRITE : 0); 201
202 if (esr & ESR_LNX_EXEC) {
203 vm_flags = VM_EXEC;
204 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
205 vm_flags = VM_WRITE;
206 mm_flags |= FAULT_FLAG_WRITE;
207 }
214 208
215 tsk = current; 209 tsk = current;
216 mm = tsk->mm; 210 mm = tsk->mm;
@@ -248,7 +242,7 @@ retry:
248#endif 242#endif
249 } 243 }
250 244
251 fault = __do_page_fault(mm, addr, esr, flags, tsk); 245 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
252 246
253 /* 247 /*
254 * If we need to retry but a fatal signal is pending, handle the 248 * If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
265 */ 259 */
266 260
267 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 261 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
268 if (flags & FAULT_FLAG_ALLOW_RETRY) { 262 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
269 if (fault & VM_FAULT_MAJOR) { 263 if (fault & VM_FAULT_MAJOR) {
270 tsk->maj_flt++; 264 tsk->maj_flt++;
271 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, 265 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
280 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of 274 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
281 * starvation. 275 * starvation.
282 */ 276 */
283 flags &= ~FAULT_FLAG_ALLOW_RETRY; 277 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
284 goto retry; 278 goto retry;
285 } 279 }
286 } 280 }
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index f91431963452..7de083d19b7e 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = {
150static struct platform_device rmt_ts_device = { 150static struct platform_device rmt_ts_device = {
151 .name = "ucb1400_ts", 151 .name = "ucb1400_ts",
152 .id = -1, 152 .id = -1,
153 }
154}; 153};
155#endif 154#endif
156 155
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index e47d19ae3e06..974e55496db3 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -468,7 +468,7 @@ static void bfin_pmu_setup(int cpu)
468 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); 468 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
469} 469}
470 470
471static int __cpuinit 471static int
472bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 472bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
473{ 473{
474 unsigned int cpu = (long)hcpu; 474 unsigned int cpu = (long)hcpu;
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 107b306b06f1..19ad0637e8ff 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -99,7 +99,7 @@ void __init generate_cplb_tables(void)
99} 99}
100#endif 100#endif
101 101
102void __cpuinit bfin_setup_caches(unsigned int cpu) 102void bfin_setup_caches(unsigned int cpu)
103{ 103{
104#ifdef CONFIG_BFIN_ICACHE 104#ifdef CONFIG_BFIN_ICACHE
105 bfin_icache_init(icplb_tbl[cpu]); 105 bfin_icache_init(icplb_tbl[cpu]);
@@ -165,7 +165,7 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
165#endif 165#endif
166} 166}
167 167
168void __cpuinit bfin_setup_cpudata(unsigned int cpu) 168void bfin_setup_cpudata(unsigned int cpu)
169{ 169{
170 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 170 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
171 171
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index c77a23bc9de3..11789beca75a 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -48,7 +48,7 @@ int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
48 return -EINVAL; 48 return -EINVAL;
49} 49}
50 50
51void __cpuinit platform_secondary_init(unsigned int cpu) 51void platform_secondary_init(unsigned int cpu)
52{ 52{
53 /* Clone setup for peripheral interrupt sources from CoreA. */ 53 /* Clone setup for peripheral interrupt sources from CoreA. */
54 bfin_write_SICB_IMASK0(bfin_read_SIC_IMASK0()); 54 bfin_write_SICB_IMASK0(bfin_read_SIC_IMASK0());
@@ -73,7 +73,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
73 spin_unlock(&boot_lock); 73 spin_unlock(&boot_lock);
74} 74}
75 75
76int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle) 76int platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
77{ 77{
78 unsigned long timeout; 78 unsigned long timeout;
79 79
@@ -154,7 +154,7 @@ void platform_clear_ipi(unsigned int cpu, int irq)
154 * Setup core B's local core timer. 154 * Setup core B's local core timer.
155 * In SMP, core timer is used for clock event device. 155 * In SMP, core timer is used for clock event device.
156 */ 156 */
157void __cpuinit bfin_local_timer_setup(void) 157void bfin_local_timer_setup(void)
158{ 158{
159#if defined(CONFIG_TICKSOURCE_CORETMR) 159#if defined(CONFIG_TICKSOURCE_CORETMR)
160 struct irq_data *data = irq_get_irq_data(IRQ_CORETMR); 160 struct irq_data *data = irq_get_irq_data(IRQ_CORETMR);
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index a60a24f5035d..0e1e451fd7d8 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -52,7 +52,7 @@ bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
52} 52}
53 53
54#ifdef CONFIG_BFIN_ICACHE 54#ifdef CONFIG_BFIN_ICACHE
55void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl) 55void bfin_icache_init(struct cplb_entry *icplb_tbl)
56{ 56{
57 bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL, 57 bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL,
58 (IMC | ENICPLB)); 58 (IMC | ENICPLB));
@@ -60,7 +60,7 @@ void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
60#endif 60#endif
61 61
62#ifdef CONFIG_BFIN_DCACHE 62#ifdef CONFIG_BFIN_DCACHE
63void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl) 63void bfin_dcache_init(struct cplb_entry *dcplb_tbl)
64{ 64{
65 /* 65 /*
66 * Anomaly notes: 66 * Anomaly notes:
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 6c0c6816a51a..d143fd8d2bc5 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1281,7 +1281,7 @@ static struct irq_chip bfin_gpio_irqchip = {
1281 .irq_set_wake = bfin_gpio_set_wake, 1281 .irq_set_wake = bfin_gpio_set_wake,
1282}; 1282};
1283 1283
1284void __cpuinit init_exception_vectors(void) 1284void init_exception_vectors(void)
1285{ 1285{
1286 /* cannot program in software: 1286 /* cannot program in software:
1287 * evt0 - emulation (jtag) 1287 * evt0 - emulation (jtag)
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 961d8392e5e3..82f301c117a5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -46,7 +46,7 @@ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
46unsigned long blackfin_iflush_l1_entry[NR_CPUS]; 46unsigned long blackfin_iflush_l1_entry[NR_CPUS];
47#endif 47#endif
48 48
49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; 49struct blackfin_initial_pda initial_pda_coreb;
50 50
51enum ipi_message_type { 51enum ipi_message_type {
52 BFIN_IPI_NONE, 52 BFIN_IPI_NONE,
@@ -147,7 +147,7 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
147 platform_clear_ipi(cpu, IRQ_SUPPLE_1); 147 platform_clear_ipi(cpu, IRQ_SUPPLE_1);
148 148
149 bfin_ipi_data = &__get_cpu_var(bfin_ipi); 149 bfin_ipi_data = &__get_cpu_var(bfin_ipi);
150 while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { 150 while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
151 msg = 0; 151 msg = 0;
152 do { 152 do {
153 msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); 153 msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
@@ -182,8 +182,8 @@ static void bfin_ipi_init(void)
182 struct ipi_data *bfin_ipi_data; 182 struct ipi_data *bfin_ipi_data;
183 for_each_possible_cpu(cpu) { 183 for_each_possible_cpu(cpu) {
184 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 184 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
185 bfin_ipi_data->bits = 0; 185 atomic_set(&bfin_ipi_data->bits, 0);
186 bfin_ipi_data->count = 0; 186 atomic_set(&bfin_ipi_data->count, 0);
187 } 187 }
188} 188}
189 189
@@ -246,7 +246,7 @@ void smp_send_stop(void)
246 return; 246 return;
247} 247}
248 248
249int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 249int __cpu_up(unsigned int cpu, struct task_struct *idle)
250{ 250{
251 int ret; 251 int ret;
252 252
@@ -259,7 +259,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
259 return ret; 259 return ret;
260} 260}
261 261
262static void __cpuinit setup_secondary(unsigned int cpu) 262static void setup_secondary(unsigned int cpu)
263{ 263{
264 unsigned long ilat; 264 unsigned long ilat;
265 265
@@ -277,7 +277,7 @@ static void __cpuinit setup_secondary(unsigned int cpu)
277 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; 277 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
278} 278}
279 279
280void __cpuinit secondary_start_kernel(void) 280void secondary_start_kernel(void)
281{ 281{
282 unsigned int cpu = smp_processor_id(); 282 unsigned int cpu = smp_processor_id();
283 struct mm_struct *mm = &init_mm; 283 struct mm_struct *mm = &init_mm;
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(resync_core_dcache);
402#endif 402#endif
403 403
404#ifdef CONFIG_HOTPLUG_CPU 404#ifdef CONFIG_HOTPLUG_CPU
405int __cpuexit __cpu_disable(void) 405int __cpu_disable(void)
406{ 406{
407 unsigned int cpu = smp_processor_id(); 407 unsigned int cpu = smp_processor_id();
408 408
@@ -415,7 +415,7 @@ int __cpuexit __cpu_disable(void)
415 415
416static DECLARE_COMPLETION(cpu_killed); 416static DECLARE_COMPLETION(cpu_killed);
417 417
418int __cpuexit __cpu_die(unsigned int cpu) 418int __cpu_die(unsigned int cpu)
419{ 419{
420 return wait_for_completion_timeout(&cpu_killed, 5000); 420 return wait_for_completion_timeout(&cpu_killed, 5000);
421} 421}
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index cdd12028de0c..fe8e6039db2a 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -197,7 +197,7 @@ int setup_profiling_timer(unsigned int multiplier)
197 */ 197 */
198unsigned long cache_decay_ticks = 1; 198unsigned long cache_decay_ticks = 1;
199 199
200int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 200int __cpu_up(unsigned int cpu, struct task_struct *tidle)
201{ 201{
202 smp_boot_one_cpu(cpu, tidle); 202 smp_boot_one_cpu(cpu, tidle);
203 return cpu_online(cpu) ? 0 : -ENOSYS; 203 return cpu_online(cpu) ? 0 : -ENOSYS;
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index ae3a6706419b..9f3a7a62d787 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -709,7 +709,7 @@ static void __init reserve_dma_coherent(void)
709/* 709/*
710 * calibrate the delay loop 710 * calibrate the delay loop
711 */ 711 */
712void __cpuinit calibrate_delay(void) 712void calibrate_delay(void)
713{ 713{
714 loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ); 714 loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
715 715
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index bfe13311d70d..29d1f1b00016 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -41,7 +41,7 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
41 41
42int on_simulator; 42int on_simulator;
43 43
44void __cpuinit calibrate_delay(void) 44void calibrate_delay(void)
45{ 45{
46 loops_per_jiffy = thread_freq_mhz * 1000000 / HZ; 46 loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
47} 47}
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 0e364ca43198..9faaa940452b 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -146,7 +146,7 @@ void __init smp_prepare_boot_cpu(void)
146 * to point to current thread info 146 * to point to current thread info
147 */ 147 */
148 148
149void __cpuinit start_secondary(void) 149void start_secondary(void)
150{ 150{
151 unsigned int cpu; 151 unsigned int cpu;
152 unsigned long thread_ptr; 152 unsigned long thread_ptr;
@@ -194,7 +194,7 @@ void __cpuinit start_secondary(void)
194 * maintains control until "cpu_online(cpu)" is set. 194 * maintains control until "cpu_online(cpu)" is set.
195 */ 195 */
196 196
197int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 197int __cpu_up(unsigned int cpu, struct task_struct *idle)
198{ 198{
199 struct thread_info *thread = (struct thread_info *)idle->stack; 199 struct thread_info *thread = (struct thread_info *)idle->stack;
200 void *stack_start; 200 void *stack_start;
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 7913695b2fcb..efbd2929aeb7 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_FAN=m
31CONFIG_ACPI_DOCK=y 31CONFIG_ACPI_DOCK=y
32CONFIG_ACPI_PROCESSOR=m 32CONFIG_ACPI_PROCESSOR=m
33CONFIG_ACPI_CONTAINER=m 33CONFIG_ACPI_CONTAINER=m
34CONFIG_HOTPLUG_PCI=m 34CONFIG_HOTPLUG_PCI=y
35CONFIG_HOTPLUG_PCI_ACPI=m 35CONFIG_HOTPLUG_PCI_ACPI=m
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index f8e913365423..f64980dd20c3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -25,7 +25,7 @@ CONFIG_ACPI_BUTTON=m
25CONFIG_ACPI_FAN=m 25CONFIG_ACPI_FAN=m
26CONFIG_ACPI_PROCESSOR=m 26CONFIG_ACPI_PROCESSOR=m
27CONFIG_ACPI_CONTAINER=m 27CONFIG_ACPI_CONTAINER=m
28CONFIG_HOTPLUG_PCI=m 28CONFIG_HOTPLUG_PCI=y
29CONFIG_HOTPLUG_PCI_ACPI=m 29CONFIG_HOTPLUG_PCI_ACPI=m
30CONFIG_PACKET=y 30CONFIG_PACKET=y
31CONFIG_UNIX=y 31CONFIG_UNIX=y
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index a5a9e02e60a0..0f4e9e41f130 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_BUTTON=m
31CONFIG_ACPI_FAN=m 31CONFIG_ACPI_FAN=m
32CONFIG_ACPI_PROCESSOR=m 32CONFIG_ACPI_PROCESSOR=m
33CONFIG_ACPI_CONTAINER=m 33CONFIG_ACPI_CONTAINER=m
34CONFIG_HOTPLUG_PCI=m 34CONFIG_HOTPLUG_PCI=y
35CONFIG_HOTPLUG_PCI_ACPI=m 35CONFIG_HOTPLUG_PCI_ACPI=m
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
index 37b9b422caad..b025acfde5c1 100644
--- a/arch/ia64/configs/xen_domu_defconfig
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -32,7 +32,7 @@ CONFIG_ACPI_BUTTON=m
32CONFIG_ACPI_FAN=m 32CONFIG_ACPI_FAN=m
33CONFIG_ACPI_PROCESSOR=m 33CONFIG_ACPI_PROCESSOR=m
34CONFIG_ACPI_CONTAINER=m 34CONFIG_ACPI_CONTAINER=m
35CONFIG_HOTPLUG_PCI=m 35CONFIG_HOTPLUG_PCI=y
36CONFIG_HOTPLUG_PCI_ACPI=m 36CONFIG_HOTPLUG_PCI_ACPI=m
37CONFIG_PACKET=y 37CONFIG_PACKET=y
38CONFIG_UNIX=y 38CONFIG_UNIX=y
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 0ac558adc605..bb21f4f63170 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -343,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
343 } 343 }
344} 344}
345 345
346int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle) 346int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
347{ 347{
348 int timeout; 348 int timeout;
349 349
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 5b18888ee364..5cc4d4dcf3cf 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -813,8 +813,8 @@ static struct metag_pmu _metag_pmu = {
813}; 813};
814 814
815/* PMU CPU hotplug notifier */ 815/* PMU CPU hotplug notifier */
816static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b, 816static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
817 unsigned long action, void *hcpu) 817 void *hcpu)
818{ 818{
819 unsigned int cpu = (unsigned int)hcpu; 819 unsigned int cpu = (unsigned int)hcpu;
820 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 820 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -828,7 +828,7 @@ static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
828 return NOTIFY_OK; 828 return NOTIFY_OK;
829} 829}
830 830
831static struct notifier_block __cpuinitdata metag_pmu_notifier = { 831static struct notifier_block metag_pmu_notifier = {
832 .notifier_call = metag_pmu_cpu_notify, 832 .notifier_call = metag_pmu_cpu_notify,
833}; 833};
834 834
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index e413875cf6d2..7c0113142981 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running);
68/* 68/*
69 * "thread" is assumed to be a valid Meta hardware thread ID. 69 * "thread" is assumed to be a valid Meta hardware thread ID.
70 */ 70 */
71int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) 71int boot_secondary(unsigned int thread, struct task_struct *idle)
72{ 72{
73 u32 val; 73 u32 val;
74 74
@@ -118,11 +118,9 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
118 * If the cache partition has changed, prints a message to the log describing 118 * If the cache partition has changed, prints a message to the log describing
119 * those changes. 119 * those changes.
120 */ 120 */
121static __cpuinit void describe_cachepart_change(unsigned int thread, 121static void describe_cachepart_change(unsigned int thread, const char *label,
122 const char *label, 122 unsigned int sz, unsigned int old,
123 unsigned int sz, 123 unsigned int new)
124 unsigned int old,
125 unsigned int new)
126{ 124{
127 unsigned int lor1, land1, gor1, gand1; 125 unsigned int lor1, land1, gor1, gand1;
128 unsigned int lor2, land2, gor2, gand2; 126 unsigned int lor2, land2, gor2, gand2;
@@ -170,7 +168,7 @@ static __cpuinit void describe_cachepart_change(unsigned int thread,
170 * Ensures that coherency is enabled and that the threads share the same cache 168 * Ensures that coherency is enabled and that the threads share the same cache
171 * partitions. 169 * partitions.
172 */ 170 */
173static __cpuinit void setup_smp_cache(unsigned int thread) 171static void setup_smp_cache(unsigned int thread)
174{ 172{
175 unsigned int this_thread, lflags; 173 unsigned int this_thread, lflags;
176 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new; 174 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
@@ -215,7 +213,7 @@ static __cpuinit void setup_smp_cache(unsigned int thread)
215 icpart_old, icpart_new); 213 icpart_old, icpart_new);
216} 214}
217 215
218int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 216int __cpu_up(unsigned int cpu, struct task_struct *idle)
219{ 217{
220 unsigned int thread = cpu_2_hwthread_id[cpu]; 218 unsigned int thread = cpu_2_hwthread_id[cpu];
221 int ret; 219 int ret;
@@ -268,7 +266,7 @@ static DECLARE_COMPLETION(cpu_killed);
268/* 266/*
269 * __cpu_disable runs on the processor to be shutdown. 267 * __cpu_disable runs on the processor to be shutdown.
270 */ 268 */
271int __cpuexit __cpu_disable(void) 269int __cpu_disable(void)
272{ 270{
273 unsigned int cpu = smp_processor_id(); 271 unsigned int cpu = smp_processor_id();
274 272
@@ -299,7 +297,7 @@ int __cpuexit __cpu_disable(void)
299 * called on the thread which is asking for a CPU to be shutdown - 297 * called on the thread which is asking for a CPU to be shutdown -
300 * waits until shutdown has completed, or it is timed out. 298 * waits until shutdown has completed, or it is timed out.
301 */ 299 */
302void __cpuexit __cpu_die(unsigned int cpu) 300void __cpu_die(unsigned int cpu)
303{ 301{
304 if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) 302 if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
305 pr_err("CPU%u: unable to kill\n", cpu); 303 pr_err("CPU%u: unable to kill\n", cpu);
@@ -311,7 +309,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
311 * Note that we do not return from this function. If this cpu is 309 * Note that we do not return from this function. If this cpu is
312 * brought online again it will need to run secondary_startup(). 310 * brought online again it will need to run secondary_startup().
313 */ 311 */
314void __cpuexit cpu_die(void) 312void cpu_die(void)
315{ 313{
316 local_irq_disable(); 314 local_irq_disable();
317 idle_task_exit(); 315 idle_task_exit();
@@ -326,7 +324,7 @@ void __cpuexit cpu_die(void)
326 * Called by both boot and secondaries to move global data into 324 * Called by both boot and secondaries to move global data into
327 * per-processor storage. 325 * per-processor storage.
328 */ 326 */
329void __cpuinit smp_store_cpu_info(unsigned int cpuid) 327void smp_store_cpu_info(unsigned int cpuid)
330{ 328{
331 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); 329 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
332 330
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index c00ade0228ef..25f9d1c2ffec 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -812,7 +812,7 @@ static void set_trigger_mask(unsigned int mask)
812} 812}
813#endif 813#endif
814 814
815void __cpuinit per_cpu_trap_init(unsigned long cpu) 815void per_cpu_trap_init(unsigned long cpu)
816{ 816{
817 TBIRES int_context; 817 TBIRES int_context;
818 unsigned int thread = cpu_2_hwthread_id[cpu]; 818 unsigned int thread = cpu_2_hwthread_id[cpu];
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4758a8fd3e99..e12764c2a9d0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -114,6 +114,7 @@ config BCM47XX
114 select FW_CFE 114 select FW_CFE
115 select HW_HAS_PCI 115 select HW_HAS_PCI
116 select IRQ_CPU 116 select IRQ_CPU
117 select SYS_HAS_CPU_MIPS32_R1
117 select NO_EXCEPT_FILL 118 select NO_EXCEPT_FILL
118 select SYS_SUPPORTS_32BIT_KERNEL 119 select SYS_SUPPORTS_32BIT_KERNEL
119 select SYS_SUPPORTS_LITTLE_ENDIAN 120 select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -1702,6 +1703,7 @@ endchoice
1702 1703
1703config KVM_GUEST 1704config KVM_GUEST
1704 bool "KVM Guest Kernel" 1705 bool "KVM Guest Kernel"
1706 depends on BROKEN_ON_SMP
1705 help 1707 help
1706 Select this option if building a guest kernel for KVM (Trap & Emulate) mode 1708 Select this option if building a guest kernel for KVM (Trap & Emulate) mode
1707 1709
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 8be4e856b8b8..80f4ecd42b0d 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -182,7 +182,7 @@ const char *get_system_type(void)
182 return ath79_sys_type; 182 return ath79_sys_type;
183} 183}
184 184
185unsigned int __cpuinit get_c0_compare_int(void) 185unsigned int get_c0_compare_int(void)
186{ 186{
187 return CP0_LEGACY_COMPARE_IRQ; 187 return CP0_LEGACY_COMPARE_IRQ;
188} 188}
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index ba611927749b..2b8b118398c4 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -2,7 +2,6 @@ if BCM47XX
2 2
3config BCM47XX_SSB 3config BCM47XX_SSB
4 bool "SSB Support for Broadcom BCM47XX" 4 bool "SSB Support for Broadcom BCM47XX"
5 select SYS_HAS_CPU_MIPS32_R1
6 select SSB 5 select SSB
7 select SSB_DRIVER_MIPS 6 select SSB_DRIVER_MIPS
8 select SSB_DRIVER_EXTIF 7 select SSB_DRIVER_EXTIF
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 7181def6037a..9d36774bded1 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1095,7 +1095,7 @@ static void octeon_irq_ip3_ciu(void)
1095 1095
1096static bool octeon_irq_use_ip4; 1096static bool octeon_irq_use_ip4;
1097 1097
1098static void __cpuinit octeon_irq_local_enable_ip4(void *arg) 1098static void octeon_irq_local_enable_ip4(void *arg)
1099{ 1099{
1100 set_c0_status(STATUSF_IP4); 1100 set_c0_status(STATUSF_IP4);
1101} 1101}
@@ -1110,21 +1110,21 @@ static void (*octeon_irq_ip2)(void);
1110static void (*octeon_irq_ip3)(void); 1110static void (*octeon_irq_ip3)(void);
1111static void (*octeon_irq_ip4)(void); 1111static void (*octeon_irq_ip4)(void);
1112 1112
1113void __cpuinitdata (*octeon_irq_setup_secondary)(void); 1113void (*octeon_irq_setup_secondary)(void);
1114 1114
1115void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) 1115void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1116{ 1116{
1117 octeon_irq_ip4 = h; 1117 octeon_irq_ip4 = h;
1118 octeon_irq_use_ip4 = true; 1118 octeon_irq_use_ip4 = true;
1119 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); 1119 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
1120} 1120}
1121 1121
1122static void __cpuinit octeon_irq_percpu_enable(void) 1122static void octeon_irq_percpu_enable(void)
1123{ 1123{
1124 irq_cpu_online(); 1124 irq_cpu_online();
1125} 1125}
1126 1126
1127static void __cpuinit octeon_irq_init_ciu_percpu(void) 1127static void octeon_irq_init_ciu_percpu(void)
1128{ 1128{
1129 int coreid = cvmx_get_core_num(); 1129 int coreid = cvmx_get_core_num();
1130 1130
@@ -1167,7 +1167,7 @@ static void octeon_irq_init_ciu2_percpu(void)
1167 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); 1167 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
1168} 1168}
1169 1169
1170static void __cpuinit octeon_irq_setup_secondary_ciu(void) 1170static void octeon_irq_setup_secondary_ciu(void)
1171{ 1171{
1172 octeon_irq_init_ciu_percpu(); 1172 octeon_irq_init_ciu_percpu();
1173 octeon_irq_percpu_enable(); 1173 octeon_irq_percpu_enable();
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 7b746e7bf7a1..1830874ff1e2 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -334,9 +334,10 @@ static void __init octeon_fdt_pip_iface(int pip, int idx, u64 *pmac)
334 char name_buffer[20]; 334 char name_buffer[20];
335 int iface; 335 int iface;
336 int p; 336 int p;
337 int count; 337 int count = 0;
338 338
339 count = cvmx_helper_interface_enumerate(idx); 339 if (cvmx_helper_interface_enumerate(idx) == 0)
340 count = cvmx_helper_ports_on_interface(idx);
340 341
341 snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx); 342 snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
342 iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer); 343 iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 295137dfdc37..138cc80c5928 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -173,7 +173,7 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle)
173 * After we've done initial boot, this function is called to allow the 173 * After we've done initial boot, this function is called to allow the
174 * board code to clean up state, if needed 174 * board code to clean up state, if needed
175 */ 175 */
176static void __cpuinit octeon_init_secondary(void) 176static void octeon_init_secondary(void)
177{ 177{
178 unsigned int sr; 178 unsigned int sr;
179 179
@@ -375,7 +375,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
375 return 0; 375 return 0;
376} 376}
377 377
378static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb, 378static int octeon_cpu_callback(struct notifier_block *nfb,
379 unsigned long action, void *hcpu) 379 unsigned long action, void *hcpu)
380{ 380{
381 unsigned int cpu = (unsigned long)hcpu; 381 unsigned int cpu = (unsigned long)hcpu;
@@ -394,7 +394,7 @@ static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
394 return NOTIFY_OK; 394 return NOTIFY_OK;
395} 395}
396 396
397static int __cpuinit register_cavium_notifier(void) 397static int register_cavium_notifier(void)
398{ 398{
399 hotcpu_notifier(octeon_cpu_callback, 0); 399 hotcpu_notifier(octeon_cpu_callback, 0);
400 return 0; 400 return 0;
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1dc086087a72..fa44f3ec5302 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -17,6 +17,8 @@
17#define current_cpu_type() current_cpu_data.cputype 17#define current_cpu_type() current_cpu_data.cputype
18#endif 18#endif
19 19
20#define boot_cpu_type() cpu_data[0].cputype
21
20/* 22/*
21 * SMP assumption: Options of CPU 0 are a superset of all processors. 23 * SMP assumption: Options of CPU 0 are a superset of all processors.
22 * This is true for all known MIPS systems. 24 * This is true for all known MIPS systems.
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 5b2f2e68e57f..9488fa5f8866 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -25,8 +25,12 @@
25#else 25#else
26#define CAC_BASE _AC(0x80000000, UL) 26#define CAC_BASE _AC(0x80000000, UL)
27#endif 27#endif
28#ifndef IO_BASE
28#define IO_BASE _AC(0xa0000000, UL) 29#define IO_BASE _AC(0xa0000000, UL)
30#endif
31#ifndef UNCAC_BASE
29#define UNCAC_BASE _AC(0xa0000000, UL) 32#define UNCAC_BASE _AC(0xa0000000, UL)
33#endif
30 34
31#ifndef MAP_BASE 35#ifndef MAP_BASE
32#ifdef CONFIG_KVM_GUEST 36#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 370d967725c2..c33a9564fb41 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -13,12 +13,8 @@
13 13
14#ifdef CONFIG_EXPORT_UASM 14#ifdef CONFIG_EXPORT_UASM
15#include <linux/export.h> 15#include <linux/export.h>
16#define __uasminit
17#define __uasminitdata
18#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym) 16#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
19#else 17#else
20#define __uasminit __cpuinit
21#define __uasminitdata __cpuinitdata
22#define UASM_EXPORT_SYMBOL(sym) 18#define UASM_EXPORT_SYMBOL(sym)
23#endif 19#endif
24 20
@@ -54,43 +50,36 @@
54#endif 50#endif
55 51
56#define Ip_u1u2u3(op) \ 52#define Ip_u1u2u3(op) \
57void __uasminit \ 53void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
58ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
59 54
60#define Ip_u2u1u3(op) \ 55#define Ip_u2u1u3(op) \
61void __uasminit \ 56void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
62ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
63 57
64#define Ip_u3u1u2(op) \ 58#define Ip_u3u1u2(op) \
65void __uasminit \ 59void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
66ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
67 60
68#define Ip_u1u2s3(op) \ 61#define Ip_u1u2s3(op) \
69void __uasminit \ 62void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
70ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
71 63
72#define Ip_u2s3u1(op) \ 64#define Ip_u2s3u1(op) \
73void __uasminit \ 65void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
74ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
75 66
76#define Ip_u2u1s3(op) \ 67#define Ip_u2u1s3(op) \
77void __uasminit \ 68void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
78ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
79 69
80#define Ip_u2u1msbu3(op) \ 70#define Ip_u2u1msbu3(op) \
81void __uasminit \ 71void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
82ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
83 unsigned int d) 72 unsigned int d)
84 73
85#define Ip_u1u2(op) \ 74#define Ip_u1u2(op) \
86void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) 75void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
87 76
88#define Ip_u1s2(op) \ 77#define Ip_u1s2(op) \
89void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b) 78void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
90 79
91#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a) 80#define Ip_u1(op) void ISAOPC(op)(u32 **buf, unsigned int a)
92 81
93#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf) 82#define Ip_0(op) void ISAOPC(op)(u32 **buf)
94 83
95Ip_u2u1s3(_addiu); 84Ip_u2u1s3(_addiu);
96Ip_u3u1u2(_addu); 85Ip_u3u1u2(_addu);
@@ -163,7 +152,7 @@ struct uasm_label {
163 int lab; 152 int lab;
164}; 153};
165 154
166void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, 155void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
167 int lid); 156 int lid);
168#ifdef CONFIG_64BIT 157#ifdef CONFIG_64BIT
169int ISAFUNC(uasm_in_compat_space_p)(long addr); 158int ISAFUNC(uasm_in_compat_space_p)(long addr);
@@ -174,7 +163,7 @@ void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
174void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr); 163void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
175 164
176#define UASM_L_LA(lb) \ 165#define UASM_L_LA(lb) \
177static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \ 166static inline void ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
178{ \ 167{ \
179 ISAFUNC(uasm_build_label)(lab, addr, label##lb); \ 168 ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
180} 169}
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index b7a23064841f..88e292b7719e 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -25,11 +25,12 @@ struct siginfo;
25/* 25/*
26 * Careful to keep union _sifields from shifting ... 26 * Careful to keep union _sifields from shifting ...
27 */ 27 */
28#if __SIZEOF_LONG__ == 4 28#if _MIPS_SZLONG == 32
29#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) 29#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
30#endif 30#elif _MIPS_SZLONG == 64
31#if __SIZEOF_LONG__ == 8
32#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 31#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
32#else
33#error _MIPS_SZLONG neither 32 nor 64
33#endif 34#endif
34 35
35#include <asm-generic/siginfo.h> 36#include <asm-generic/siginfo.h>
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 64c4fd62cf08..bd79c4f9bff4 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -28,8 +28,6 @@
28 .set mips0 28 .set mips0
29 .endm 29 .endm
30 30
31 __CPUINIT
32
33/*********************************************************************** 31/***********************************************************************
34 * Alternate CPU1 startup vector for BMIPS4350 32 * Alternate CPU1 startup vector for BMIPS4350
35 * 33 *
@@ -56,7 +54,11 @@ LEAF(bmips_smp_movevec)
56 /* set up CPU1 CBR; move BASE to 0xa000_0000 */ 54 /* set up CPU1 CBR; move BASE to 0xa000_0000 */
57 li k0, 0xff400000 55 li k0, 0xff400000
58 mtc0 k0, $22, 6 56 mtc0 k0, $22, 6
59 li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1 57 /* set up relocation vector address based on thread ID */
58 mfc0 k1, $22, 3
59 srl k1, 16
60 andi k1, 0x8000
61 or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
60 or k0, k1 62 or k0, k1
61 li k1, 0xa0080000 63 li k1, 0xa0080000
62 sw k1, 0(k0) 64 sw k1, 0(k0)
@@ -216,8 +218,6 @@ END(bmips_smp_int_vec)
216 * Certain CPUs support extending kseg0 to 1024MB. 218 * Certain CPUs support extending kseg0 to 1024MB.
217 ***********************************************************************/ 219 ***********************************************************************/
218 220
219 __CPUINIT
220
221LEAF(bmips_enable_xks01) 221LEAF(bmips_enable_xks01)
222 222
223#if defined(CONFIG_XKS01) 223#if defined(CONFIG_XKS01)
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 15f618b40cf6..7976457184b1 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -109,7 +109,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
109static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); 109static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
110static DEFINE_PER_CPU(char [18], sibyte_hpt_name); 110static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
111 111
112void __cpuinit sb1480_clockevent_init(void) 112void sb1480_clockevent_init(void)
113{ 113{
114 unsigned int cpu = smp_processor_id(); 114 unsigned int cpu = smp_processor_id();
115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; 115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
index 730eaf92c018..594cbbf16d62 100644
--- a/arch/mips/kernel/cevt-gic.c
+++ b/arch/mips/kernel/cevt-gic.c
@@ -59,7 +59,7 @@ void gic_event_handler(struct clock_event_device *dev)
59{ 59{
60} 60}
61 61
62int __cpuinit gic_clockevent_init(void) 62int gic_clockevent_init(void)
63{ 63{
64 unsigned int cpu = smp_processor_id(); 64 unsigned int cpu = smp_processor_id();
65 struct clock_event_device *cd; 65 struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 02033eaf8825..50d3f5a8d6bb 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -171,7 +171,7 @@ int c0_compare_int_usable(void)
171} 171}
172 172
173#ifndef CONFIG_MIPS_MT_SMTC 173#ifndef CONFIG_MIPS_MT_SMTC
174int __cpuinit r4k_clockevent_init(void) 174int r4k_clockevent_init(void)
175{ 175{
176 unsigned int cpu = smp_processor_id(); 176 unsigned int cpu = smp_processor_id();
177 struct clock_event_device *cd; 177 struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 200f2778bf36..5ea6d6b1de15 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -107,7 +107,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
107static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); 107static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
108static DEFINE_PER_CPU(char [18], sibyte_hpt_name); 108static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
109 109
110void __cpuinit sb1250_clockevent_init(void) 110void sb1250_clockevent_init(void)
111{ 111{
112 unsigned int cpu = smp_processor_id(); 112 unsigned int cpu = smp_processor_id();
113 unsigned int irq = K_INT_TIMER_0 + cpu; 113 unsigned int irq = K_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 9de5ed7ef1a3..b6cf0a60d896 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -248,7 +248,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
248} 248}
249 249
250 250
251int __cpuinit smtc_clockevent_init(void) 251int smtc_clockevent_init(void)
252{ 252{
253 uint64_t mips_freq = mips_hpt_frequency; 253 uint64_t mips_freq = mips_hpt_frequency;
254 unsigned int cpu = smp_processor_id(); 254 unsigned int cpu = smp_processor_id();
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 0c61df281ce6..2d80b5f1aeae 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -168,7 +168,7 @@ static inline void check_mult_sh(void)
168 panic(bug64hit, !R4000_WAR ? r4kwar : nowar); 168 panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
169} 169}
170 170
171static volatile int daddi_ov __cpuinitdata; 171static volatile int daddi_ov;
172 172
173asmlinkage void __init do_daddi_ov(struct pt_regs *regs) 173asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
174{ 174{
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c7b1b3c5a761..4c6167a17875 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -27,7 +27,7 @@
27#include <asm/spram.h> 27#include <asm/spram.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30static int __cpuinitdata mips_fpu_disabled; 30static int mips_fpu_disabled;
31 31
32static int __init fpu_disable(char *s) 32static int __init fpu_disable(char *s)
33{ 33{
@@ -39,7 +39,7 @@ static int __init fpu_disable(char *s)
39 39
40__setup("nofpu", fpu_disable); 40__setup("nofpu", fpu_disable);
41 41
42int __cpuinitdata mips_dsp_disabled; 42int mips_dsp_disabled;
43 43
44static int __init dsp_disable(char *s) 44static int __init dsp_disable(char *s)
45{ 45{
@@ -134,7 +134,7 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
134#endif 134#endif
135} 135}
136 136
137static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) 137static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
138{ 138{
139 switch (isa) { 139 switch (isa) {
140 case MIPS_CPU_ISA_M64R2: 140 case MIPS_CPU_ISA_M64R2:
@@ -159,7 +159,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
159 } 159 }
160} 160}
161 161
162static char unknown_isa[] __cpuinitdata = KERN_ERR \ 162static char unknown_isa[] = KERN_ERR \
163 "Unsupported ISA type, c0.config0: %d."; 163 "Unsupported ISA type, c0.config0: %d.";
164 164
165static inline unsigned int decode_config0(struct cpuinfo_mips *c) 165static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -290,7 +290,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
290 return config4 & MIPS_CONF_M; 290 return config4 & MIPS_CONF_M;
291} 291}
292 292
293static void __cpuinit decode_configs(struct cpuinfo_mips *c) 293static void decode_configs(struct cpuinfo_mips *c)
294{ 294{
295 int ok; 295 int ok;
296 296
@@ -962,7 +962,7 @@ EXPORT_SYMBOL(__ua_limit);
962const char *__cpu_name[NR_CPUS]; 962const char *__cpu_name[NR_CPUS];
963const char *__elf_platform; 963const char *__elf_platform;
964 964
965__cpuinit void cpu_probe(void) 965void cpu_probe(void)
966{ 966{
967 struct cpuinfo_mips *c = &current_cpu_data; 967 struct cpuinfo_mips *c = &current_cpu_data;
968 unsigned int cpu = smp_processor_id(); 968 unsigned int cpu = smp_processor_id();
@@ -1047,7 +1047,7 @@ __cpuinit void cpu_probe(void)
1047#endif 1047#endif
1048} 1048}
1049 1049
1050__cpuinit void cpu_report(void) 1050void cpu_report(void)
1051{ 1051{
1052 struct cpuinfo_mips *c = &current_cpu_data; 1052 struct cpuinfo_mips *c = &current_cpu_data;
1053 1053
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 099912324423..7b6a5b3e3acf 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -158,8 +158,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
158 j start_kernel 158 j start_kernel
159 END(kernel_entry) 159 END(kernel_entry)
160 160
161 __CPUINIT
162
163#ifdef CONFIG_SMP 161#ifdef CONFIG_SMP
164/* 162/*
165 * SMP slave cpus entry point. Board specific code for bootstrap calls this 163 * SMP slave cpus entry point. Board specific code for bootstrap calls this
@@ -188,5 +186,3 @@ NESTED(smp_bootstrap, 16, sp)
188 j start_secondary 186 j start_secondary
189 END(smp_bootstrap) 187 END(smp_bootstrap)
190#endif /* CONFIG_SMP */ 188#endif /* CONFIG_SMP */
191
192 __FINIT
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index aea6c0885838..126da74d4c55 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void)
66 int i, cpu = 1, boot_cpu = 0; 66 int i, cpu = 1, boot_cpu = 0;
67 67
68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
69 int cpu_hw_intr;
70
69 /* arbitration priority */ 71 /* arbitration priority */
70 clear_c0_brcm_cmt_ctrl(0x30); 72 clear_c0_brcm_cmt_ctrl(0x30);
71 73
@@ -79,15 +81,13 @@ static void __init bmips_smp_setup(void)
79 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread 81 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
80 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output 82 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
81 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output 83 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
82 *
83 * If booting from TP1, leave the existing CMT interrupt routing
84 * such that TP0 responds to SW1 and TP1 responds to SW0.
85 */ 84 */
86 if (boot_cpu == 0) 85 if (boot_cpu == 0)
87 change_c0_brcm_cmt_intr(0xf8018000, 86 cpu_hw_intr = 0x02;
88 (0x02 << 27) | (0x03 << 15));
89 else 87 else
90 change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27)); 88 cpu_hw_intr = 0x1d;
89
90 change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
91 91
92 /* single core, 2 threads (2 pipelines) */ 92 /* single core, 2 threads (2 pipelines) */
93 max_cpus = 2; 93 max_cpus = 2;
@@ -173,7 +173,7 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
173 else { 173 else {
174#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 174#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
175 /* Reset slave TP1 if booting from TP0 */ 175 /* Reset slave TP1 if booting from TP0 */
176 if (cpu_logical_map(cpu) == 0) 176 if (cpu_logical_map(cpu) == 1)
177 set_c0_brcm_cmt_ctrl(0x01); 177 set_c0_brcm_cmt_ctrl(0x01);
178#elif defined(CONFIG_CPU_BMIPS5000) 178#elif defined(CONFIG_CPU_BMIPS5000)
179 if (cpu & 0x01) 179 if (cpu & 0x01)
@@ -202,9 +202,15 @@ static void bmips_init_secondary(void)
202#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 202#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
203 void __iomem *cbr = BMIPS_GET_CBR(); 203 void __iomem *cbr = BMIPS_GET_CBR();
204 unsigned long old_vec; 204 unsigned long old_vec;
205 unsigned long relo_vector;
206 int boot_cpu;
207
208 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
209 relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
210 BMIPS_RELO_VECTOR_CONTROL_1;
205 211
206 old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1); 212 old_vec = __raw_readl(cbr + relo_vector);
207 __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1); 213 __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
208 214
209 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); 215 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
210#elif defined(CONFIG_CPU_BMIPS5000) 216#elif defined(CONFIG_CPU_BMIPS5000)
@@ -398,7 +404,7 @@ struct plat_smp_ops bmips_smp_ops = {
398 * UP BMIPS systems as well. 404 * UP BMIPS systems as well.
399 ***********************************************************************/ 405 ***********************************************************************/
400 406
401static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) 407static void bmips_wr_vec(unsigned long dst, char *start, char *end)
402{ 408{
403 memcpy((void *)dst, start, end - start); 409 memcpy((void *)dst, start, end - start);
404 dma_cache_wback((unsigned long)start, end - start); 410 dma_cache_wback((unsigned long)start, end - start);
@@ -406,7 +412,7 @@ static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
406 instruction_hazard(); 412 instruction_hazard();
407} 413}
408 414
409static inline void __cpuinit bmips_nmi_handler_setup(void) 415static inline void bmips_nmi_handler_setup(void)
410{ 416{
411 bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, 417 bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
412 &bmips_reset_nmi_vec_end); 418 &bmips_reset_nmi_vec_end);
@@ -414,7 +420,7 @@ static inline void __cpuinit bmips_nmi_handler_setup(void)
414 &bmips_smp_int_vec_end); 420 &bmips_smp_int_vec_end);
415} 421}
416 422
417void __cpuinit bmips_ebase_setup(void) 423void bmips_ebase_setup(void)
418{ 424{
419 unsigned long new_ebase = ebase; 425 unsigned long new_ebase = ebase;
420 void __iomem __maybe_unused *cbr; 426 void __iomem __maybe_unused *cbr;
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 3e5164c11cac..57a3f7a2b370 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -149,7 +149,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
149 vsmp_send_ipi_single(i, action); 149 vsmp_send_ipi_single(i, action);
150} 150}
151 151
152static void __cpuinit vsmp_init_secondary(void) 152static void vsmp_init_secondary(void)
153{ 153{
154#ifdef CONFIG_IRQ_GIC 154#ifdef CONFIG_IRQ_GIC
155 /* This is Malta specific: IPI,performance and timer interrupts */ 155 /* This is Malta specific: IPI,performance and timer interrupts */
@@ -162,7 +162,7 @@ static void __cpuinit vsmp_init_secondary(void)
162 STATUSF_IP6 | STATUSF_IP7); 162 STATUSF_IP6 | STATUSF_IP7);
163} 163}
164 164
165static void __cpuinit vsmp_smp_finish(void) 165static void vsmp_smp_finish(void)
166{ 166{
167 /* CDFIXME: remove this? */ 167 /* CDFIXME: remove this? */
168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
@@ -188,7 +188,7 @@ static void vsmp_cpus_done(void)
188 * (unsigned long)idle->thread_info the gp 188 * (unsigned long)idle->thread_info the gp
189 * assumes a 1:1 mapping of TC => VPE 189 * assumes a 1:1 mapping of TC => VPE
190 */ 190 */
191static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) 191static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
192{ 192{
193 struct thread_info *gp = task_thread_info(idle); 193 struct thread_info *gp = task_thread_info(idle);
194 dvpe(); 194 dvpe();
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 00500fea2750..7fde3e4d978f 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -28,11 +28,11 @@ static inline void up_send_ipi_mask(const struct cpumask *mask,
28 * After we've done initial boot, this function is called to allow the 28 * After we've done initial boot, this function is called to allow the
29 * board code to clean up state, if needed 29 * board code to clean up state, if needed
30 */ 30 */
31static void __cpuinit up_init_secondary(void) 31static void up_init_secondary(void)
32{ 32{
33} 33}
34 34
35static void __cpuinit up_smp_finish(void) 35static void up_smp_finish(void)
36{ 36{
37} 37}
38 38
@@ -44,7 +44,7 @@ static void up_cpus_done(void)
44/* 44/*
45 * Firmware CPU startup hook 45 * Firmware CPU startup hook
46 */ 46 */
47static void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) 47static void up_boot_secondary(int cpu, struct task_struct *idle)
48{ 48{
49} 49}
50 50
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6e7862ab46cc..5c208ed8f856 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -86,7 +86,7 @@ static inline void set_cpu_sibling_map(int cpu)
86struct plat_smp_ops *mp_ops; 86struct plat_smp_ops *mp_ops;
87EXPORT_SYMBOL(mp_ops); 87EXPORT_SYMBOL(mp_ops);
88 88
89__cpuinit void register_smp_ops(struct plat_smp_ops *ops) 89void register_smp_ops(struct plat_smp_ops *ops)
90{ 90{
91 if (mp_ops) 91 if (mp_ops)
92 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 92 printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -98,7 +98,7 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
98 * First C code run on the secondary CPUs after being started up by 98 * First C code run on the secondary CPUs after being started up by
99 * the master. 99 * the master.
100 */ 100 */
101asmlinkage __cpuinit void start_secondary(void) 101asmlinkage void start_secondary(void)
102{ 102{
103 unsigned int cpu; 103 unsigned int cpu;
104 104
@@ -197,7 +197,7 @@ void smp_prepare_boot_cpu(void)
197 cpu_set(0, cpu_callin_map); 197 cpu_set(0, cpu_callin_map);
198} 198}
199 199
200int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 200int __cpu_up(unsigned int cpu, struct task_struct *tidle)
201{ 201{
202 mp_ops->boot_secondary(cpu, tidle); 202 mp_ops->boot_secondary(cpu, tidle);
203 203
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 75a4fd709841..dfc1b911be04 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -645,7 +645,7 @@ void smtc_prepare_cpus(int cpus)
645 * (unsigned long)idle->thread_info the gp 645 * (unsigned long)idle->thread_info the gp
646 * 646 *
647 */ 647 */
648void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 648void smtc_boot_secondary(int cpu, struct task_struct *idle)
649{ 649{
650 extern u32 kernelsp[NR_CPUS]; 650 extern u32 kernelsp[NR_CPUS];
651 unsigned long flags; 651 unsigned long flags;
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 6af08d896e20..93f86817f20a 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -37,7 +37,7 @@
37/* 37/*
38 * Different semantics to the set_c0_* function built by __BUILD_SET_C0 38 * Different semantics to the set_c0_* function built by __BUILD_SET_C0
39 */ 39 */
40static __cpuinit unsigned int bis_c0_errctl(unsigned int set) 40static unsigned int bis_c0_errctl(unsigned int set)
41{ 41{
42 unsigned int res; 42 unsigned int res;
43 res = read_c0_errctl(); 43 res = read_c0_errctl();
@@ -45,7 +45,7 @@ static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
45 return res; 45 return res;
46} 46}
47 47
48static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data) 48static void ispram_store_tag(unsigned int offset, unsigned int data)
49{ 49{
50 unsigned int errctl; 50 unsigned int errctl;
51 51
@@ -64,7 +64,7 @@ static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
64} 64}
65 65
66 66
67static __cpuinit unsigned int ispram_load_tag(unsigned int offset) 67static unsigned int ispram_load_tag(unsigned int offset)
68{ 68{
69 unsigned int data; 69 unsigned int data;
70 unsigned int errctl; 70 unsigned int errctl;
@@ -82,7 +82,7 @@ static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
82 return data; 82 return data;
83} 83}
84 84
85static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data) 85static void dspram_store_tag(unsigned int offset, unsigned int data)
86{ 86{
87 unsigned int errctl; 87 unsigned int errctl;
88 88
@@ -98,7 +98,7 @@ static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
98} 98}
99 99
100 100
101static __cpuinit unsigned int dspram_load_tag(unsigned int offset) 101static unsigned int dspram_load_tag(unsigned int offset)
102{ 102{
103 unsigned int data; 103 unsigned int data;
104 unsigned int errctl; 104 unsigned int errctl;
@@ -115,7 +115,7 @@ static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
115 return data; 115 return data;
116} 116}
117 117
118static __cpuinit void probe_spram(char *type, 118static void probe_spram(char *type,
119 unsigned int base, 119 unsigned int base,
120 unsigned int (*read)(unsigned int), 120 unsigned int (*read)(unsigned int),
121 void (*write)(unsigned int, unsigned int)) 121 void (*write)(unsigned int, unsigned int))
@@ -196,7 +196,7 @@ static __cpuinit void probe_spram(char *type,
196 offset += 2 * SPRAM_TAG_STRIDE; 196 offset += 2 * SPRAM_TAG_STRIDE;
197 } 197 }
198} 198}
199void __cpuinit spram_config(void) 199void spram_config(void)
200{ 200{
201 struct cpuinfo_mips *c = &current_cpu_data; 201 struct cpuinfo_mips *c = &current_cpu_data;
202 unsigned int config0; 202 unsigned int config0;
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 1ff43d5ac2c4..84536bf4a154 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -20,15 +20,15 @@
20#include <asm/barrier.h> 20#include <asm/barrier.h>
21#include <asm/mipsregs.h> 21#include <asm/mipsregs.h>
22 22
23static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0); 23static atomic_t count_start_flag = ATOMIC_INIT(0);
24static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); 24static atomic_t count_count_start = ATOMIC_INIT(0);
25static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); 25static atomic_t count_count_stop = ATOMIC_INIT(0);
26static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); 26static atomic_t count_reference = ATOMIC_INIT(0);
27 27
28#define COUNTON 100 28#define COUNTON 100
29#define NR_LOOPS 5 29#define NR_LOOPS 5
30 30
31void __cpuinit synchronise_count_master(int cpu) 31void synchronise_count_master(int cpu)
32{ 32{
33 int i; 33 int i;
34 unsigned long flags; 34 unsigned long flags;
@@ -106,7 +106,7 @@ void __cpuinit synchronise_count_master(int cpu)
106 printk("done.\n"); 106 printk("done.\n");
107} 107}
108 108
109void __cpuinit synchronise_count_slave(int cpu) 109void synchronise_count_slave(int cpu)
110{ 110{
111 int i; 111 int i;
112 unsigned int initcount; 112 unsigned int initcount;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0903d70b2cfe..aec3408edd4b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -90,7 +90,7 @@ void (*board_nmi_handler_setup)(void);
90void (*board_ejtag_handler_setup)(void); 90void (*board_ejtag_handler_setup)(void);
91void (*board_bind_eic_interrupt)(int irq, int regset); 91void (*board_bind_eic_interrupt)(int irq, int regset);
92void (*board_ebase_setup)(void); 92void (*board_ebase_setup)(void);
93void __cpuinitdata(*board_cache_error_setup)(void); 93void(*board_cache_error_setup)(void);
94 94
95static void show_raw_backtrace(unsigned long reg29) 95static void show_raw_backtrace(unsigned long reg29)
96{ 96{
@@ -1242,7 +1242,6 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1242 panic("Caught Machine Check exception - %scaused by multiple " 1242 panic("Caught Machine Check exception - %scaused by multiple "
1243 "matching entries in the TLB.", 1243 "matching entries in the TLB.",
1244 (multi_match) ? "" : "not "); 1244 (multi_match) ? "" : "not ");
1245 exception_exit(prev_state);
1246} 1245}
1247 1246
1248asmlinkage void do_mt(struct pt_regs *regs) 1247asmlinkage void do_mt(struct pt_regs *regs)
@@ -1682,7 +1681,7 @@ int cp0_compare_irq_shift;
1682int cp0_perfcount_irq; 1681int cp0_perfcount_irq;
1683EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 1682EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1684 1683
1685static int __cpuinitdata noulri; 1684static int noulri;
1686 1685
1687static int __init ulri_disable(char *s) 1686static int __init ulri_disable(char *s)
1688{ 1687{
@@ -1693,7 +1692,7 @@ static int __init ulri_disable(char *s)
1693} 1692}
1694__setup("noulri", ulri_disable); 1693__setup("noulri", ulri_disable);
1695 1694
1696void __cpuinit per_cpu_trap_init(bool is_boot_cpu) 1695void per_cpu_trap_init(bool is_boot_cpu)
1697{ 1696{
1698 unsigned int cpu = smp_processor_id(); 1697 unsigned int cpu = smp_processor_id();
1699 unsigned int status_set = ST0_CU0; 1698 unsigned int status_set = ST0_CU0;
@@ -1810,7 +1809,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1810} 1809}
1811 1810
1812/* Install CPU exception handler */ 1811/* Install CPU exception handler */
1813void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) 1812void set_handler(unsigned long offset, void *addr, unsigned long size)
1814{ 1813{
1815#ifdef CONFIG_CPU_MICROMIPS 1814#ifdef CONFIG_CPU_MICROMIPS
1816 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); 1815 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
@@ -1820,7 +1819,7 @@ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1820 local_flush_icache_range(ebase + offset, ebase + offset + size); 1819 local_flush_icache_range(ebase + offset, ebase + offset + size);
1821} 1820}
1822 1821
1823static char panic_null_cerr[] __cpuinitdata = 1822static char panic_null_cerr[] =
1824 "Trying to set NULL cache error exception handler"; 1823 "Trying to set NULL cache error exception handler";
1825 1824
1826/* 1825/*
@@ -1828,7 +1827,7 @@ static char panic_null_cerr[] __cpuinitdata =
1828 * This is suitable only for the cache error exception which is the only 1827 * This is suitable only for the cache error exception which is the only
1829 * exception handler that is being run uncached. 1828 * exception handler that is being run uncached.
1830 */ 1829 */
1831void __cpuinit set_uncached_handler(unsigned long offset, void *addr, 1830void set_uncached_handler(unsigned long offset, void *addr,
1832 unsigned long size) 1831 unsigned long size)
1833{ 1832{
1834 unsigned long uncached_ebase = CKSEG1ADDR(ebase); 1833 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index cbdc4de85bb4..2a03abb5bd2c 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -100,7 +100,7 @@ void mips_clear_watch_registers(void)
100 } 100 }
101} 101}
102 102
103__cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) 103void mips_probe_watch_registers(struct cpuinfo_mips *c)
104{ 104{
105 unsigned int t; 105 unsigned int t;
106 106
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 2c15590e55f7..30e334e823bd 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -5,7 +5,6 @@ source "virt/kvm/Kconfig"
5 5
6menuconfig VIRTUALIZATION 6menuconfig VIRTUALIZATION
7 bool "Virtualization" 7 bool "Virtualization"
8 depends on HAVE_KVM
9 ---help--- 8 ---help---
10 Say Y here to get to see options for using your Linux host to run 9 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests). 10 other operating systems inside virtual machines (guests).
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 51194875f158..eb3e18659630 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -461,7 +461,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
461 return 0; 461 return 0;
462} 462}
463 463
464unsigned int __cpuinit get_c0_compare_int(void) 464unsigned int get_c0_compare_int(void)
465{ 465{
466 return MIPS_CPU_TIMER_IRQ; 466 return MIPS_CPU_TIMER_IRQ;
467} 467}
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 65e3dfc4e585..d8522f8e842a 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -36,7 +36,7 @@
36 * values, so we can avoid sharing the same stack area between a cached 36 * values, so we can avoid sharing the same stack area between a cached
37 * and the uncached mode. 37 * and the uncached mode.
38 */ 38 */
39unsigned long __cpuinit run_uncached(void *func) 39unsigned long run_uncached(void *func)
40{ 40{
41 register long sp __asm__("$sp"); 41 register long sp __asm__("$sp");
42 register long ret __asm__("$2"); 42 register long ret __asm__("$2");
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 8557fb552863..a0bcdbb81d41 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -180,7 +180,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
180 * Probe Octeon's caches 180 * Probe Octeon's caches
181 * 181 *
182 */ 182 */
183static void __cpuinit probe_octeon(void) 183static void probe_octeon(void)
184{ 184{
185 unsigned long icache_size; 185 unsigned long icache_size;
186 unsigned long dcache_size; 186 unsigned long dcache_size;
@@ -251,7 +251,7 @@ static void __cpuinit probe_octeon(void)
251 } 251 }
252} 252}
253 253
254static void __cpuinit octeon_cache_error_setup(void) 254static void octeon_cache_error_setup(void)
255{ 255{
256 extern char except_vec2_octeon; 256 extern char except_vec2_octeon;
257 set_handler(0x100, &except_vec2_octeon, 0x80); 257 set_handler(0x100, &except_vec2_octeon, 0x80);
@@ -261,7 +261,7 @@ static void __cpuinit octeon_cache_error_setup(void)
261 * Setup the Octeon cache flush routines 261 * Setup the Octeon cache flush routines
262 * 262 *
263 */ 263 */
264void __cpuinit octeon_cache_init(void) 264void octeon_cache_init(void)
265{ 265{
266 probe_octeon(); 266 probe_octeon();
267 267
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 704dc735a59d..2fcde0c8ea02 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -26,7 +26,7 @@
26static unsigned long icache_size, dcache_size; /* Size in bytes */ 26static unsigned long icache_size, dcache_size; /* Size in bytes */
27static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ 27static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
28 28
29unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) 29unsigned long r3k_cache_size(unsigned long ca_flags)
30{ 30{
31 unsigned long flags, status, dummy, size; 31 unsigned long flags, status, dummy, size;
32 volatile unsigned long *p; 32 volatile unsigned long *p;
@@ -61,7 +61,7 @@ unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
61 return size * sizeof(*p); 61 return size * sizeof(*p);
62} 62}
63 63
64unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) 64unsigned long r3k_cache_lsize(unsigned long ca_flags)
65{ 65{
66 unsigned long flags, status, lsize, i; 66 unsigned long flags, status, lsize, i;
67 volatile unsigned long *p; 67 volatile unsigned long *p;
@@ -90,7 +90,7 @@ unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
90 return lsize * sizeof(*p); 90 return lsize * sizeof(*p);
91} 91}
92 92
93static void __cpuinit r3k_probe_cache(void) 93static void r3k_probe_cache(void)
94{ 94{
95 dcache_size = r3k_cache_size(ST0_ISC); 95 dcache_size = r3k_cache_size(ST0_ISC);
96 if (dcache_size) 96 if (dcache_size)
@@ -312,7 +312,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
312 r3k_flush_dcache_range(start, start + size); 312 r3k_flush_dcache_range(start, start + size);
313} 313}
314 314
315void __cpuinit r3k_cache_init(void) 315void r3k_cache_init(void)
316{ 316{
317 extern void build_clear_page(void); 317 extern void build_clear_page(void);
318 extern void build_copy_page(void); 318 extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 21813beec7a5..f749f687ee87 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -107,7 +107,7 @@ static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
107 blast_dcache64_page(addr); 107 blast_dcache64_page(addr);
108} 108}
109 109
110static void __cpuinit r4k_blast_dcache_page_setup(void) 110static void r4k_blast_dcache_page_setup(void)
111{ 111{
112 unsigned long dc_lsize = cpu_dcache_line_size(); 112 unsigned long dc_lsize = cpu_dcache_line_size();
113 113
@@ -123,7 +123,7 @@ static void __cpuinit r4k_blast_dcache_page_setup(void)
123 123
124static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 124static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
125 125
126static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) 126static void r4k_blast_dcache_page_indexed_setup(void)
127{ 127{
128 unsigned long dc_lsize = cpu_dcache_line_size(); 128 unsigned long dc_lsize = cpu_dcache_line_size();
129 129
@@ -140,7 +140,7 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
140void (* r4k_blast_dcache)(void); 140void (* r4k_blast_dcache)(void);
141EXPORT_SYMBOL(r4k_blast_dcache); 141EXPORT_SYMBOL(r4k_blast_dcache);
142 142
143static void __cpuinit r4k_blast_dcache_setup(void) 143static void r4k_blast_dcache_setup(void)
144{ 144{
145 unsigned long dc_lsize = cpu_dcache_line_size(); 145 unsigned long dc_lsize = cpu_dcache_line_size();
146 146
@@ -227,7 +227,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
227 227
228static void (* r4k_blast_icache_page)(unsigned long addr); 228static void (* r4k_blast_icache_page)(unsigned long addr);
229 229
230static void __cpuinit r4k_blast_icache_page_setup(void) 230static void r4k_blast_icache_page_setup(void)
231{ 231{
232 unsigned long ic_lsize = cpu_icache_line_size(); 232 unsigned long ic_lsize = cpu_icache_line_size();
233 233
@@ -244,7 +244,7 @@ static void __cpuinit r4k_blast_icache_page_setup(void)
244 244
245static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 245static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
246 246
247static void __cpuinit r4k_blast_icache_page_indexed_setup(void) 247static void r4k_blast_icache_page_indexed_setup(void)
248{ 248{
249 unsigned long ic_lsize = cpu_icache_line_size(); 249 unsigned long ic_lsize = cpu_icache_line_size();
250 250
@@ -269,7 +269,7 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
269void (* r4k_blast_icache)(void); 269void (* r4k_blast_icache)(void);
270EXPORT_SYMBOL(r4k_blast_icache); 270EXPORT_SYMBOL(r4k_blast_icache);
271 271
272static void __cpuinit r4k_blast_icache_setup(void) 272static void r4k_blast_icache_setup(void)
273{ 273{
274 unsigned long ic_lsize = cpu_icache_line_size(); 274 unsigned long ic_lsize = cpu_icache_line_size();
275 275
@@ -290,7 +290,7 @@ static void __cpuinit r4k_blast_icache_setup(void)
290 290
291static void (* r4k_blast_scache_page)(unsigned long addr); 291static void (* r4k_blast_scache_page)(unsigned long addr);
292 292
293static void __cpuinit r4k_blast_scache_page_setup(void) 293static void r4k_blast_scache_page_setup(void)
294{ 294{
295 unsigned long sc_lsize = cpu_scache_line_size(); 295 unsigned long sc_lsize = cpu_scache_line_size();
296 296
@@ -308,7 +308,7 @@ static void __cpuinit r4k_blast_scache_page_setup(void)
308 308
309static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 309static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
310 310
311static void __cpuinit r4k_blast_scache_page_indexed_setup(void) 311static void r4k_blast_scache_page_indexed_setup(void)
312{ 312{
313 unsigned long sc_lsize = cpu_scache_line_size(); 313 unsigned long sc_lsize = cpu_scache_line_size();
314 314
@@ -326,7 +326,7 @@ static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
326 326
327static void (* r4k_blast_scache)(void); 327static void (* r4k_blast_scache)(void);
328 328
329static void __cpuinit r4k_blast_scache_setup(void) 329static void r4k_blast_scache_setup(void)
330{ 330{
331 unsigned long sc_lsize = cpu_scache_line_size(); 331 unsigned long sc_lsize = cpu_scache_line_size();
332 332
@@ -797,11 +797,11 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c)
797 } 797 }
798} 798}
799 799
800static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", 800static char *way_string[] = { NULL, "direct mapped", "2-way",
801 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 801 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
802}; 802};
803 803
804static void __cpuinit probe_pcache(void) 804static void probe_pcache(void)
805{ 805{
806 struct cpuinfo_mips *c = &current_cpu_data; 806 struct cpuinfo_mips *c = &current_cpu_data;
807 unsigned int config = read_c0_config(); 807 unsigned int config = read_c0_config();
@@ -1119,7 +1119,7 @@ static void __cpuinit probe_pcache(void)
1119 * executes in KSEG1 space or else you will crash and burn badly. You have 1119 * executes in KSEG1 space or else you will crash and burn badly. You have
1120 * been warned. 1120 * been warned.
1121 */ 1121 */
1122static int __cpuinit probe_scache(void) 1122static int probe_scache(void)
1123{ 1123{
1124 unsigned long flags, addr, begin, end, pow2; 1124 unsigned long flags, addr, begin, end, pow2;
1125 unsigned int config = read_c0_config(); 1125 unsigned int config = read_c0_config();
@@ -1196,7 +1196,7 @@ extern int r5k_sc_init(void);
1196extern int rm7k_sc_init(void); 1196extern int rm7k_sc_init(void);
1197extern int mips_sc_init(void); 1197extern int mips_sc_init(void);
1198 1198
1199static void __cpuinit setup_scache(void) 1199static void setup_scache(void)
1200{ 1200{
1201 struct cpuinfo_mips *c = &current_cpu_data; 1201 struct cpuinfo_mips *c = &current_cpu_data;
1202 unsigned int config = read_c0_config(); 1202 unsigned int config = read_c0_config();
@@ -1329,7 +1329,7 @@ static void nxp_pr4450_fixup_config(void)
1329 NXP_BARRIER(); 1329 NXP_BARRIER();
1330} 1330}
1331 1331
1332static int __cpuinitdata cca = -1; 1332static int cca = -1;
1333 1333
1334static int __init cca_setup(char *str) 1334static int __init cca_setup(char *str)
1335{ 1335{
@@ -1340,7 +1340,7 @@ static int __init cca_setup(char *str)
1340 1340
1341early_param("cca", cca_setup); 1341early_param("cca", cca_setup);
1342 1342
1343static void __cpuinit coherency_setup(void) 1343static void coherency_setup(void)
1344{ 1344{
1345 if (cca < 0 || cca > 7) 1345 if (cca < 0 || cca > 7)
1346 cca = read_c0_config() & CONF_CM_CMASK; 1346 cca = read_c0_config() & CONF_CM_CMASK;
@@ -1380,7 +1380,7 @@ static void __cpuinit coherency_setup(void)
1380 } 1380 }
1381} 1381}
1382 1382
1383static void __cpuinit r4k_cache_error_setup(void) 1383static void r4k_cache_error_setup(void)
1384{ 1384{
1385 extern char __weak except_vec2_generic; 1385 extern char __weak except_vec2_generic;
1386 extern char __weak except_vec2_sb1; 1386 extern char __weak except_vec2_sb1;
@@ -1398,7 +1398,7 @@ static void __cpuinit r4k_cache_error_setup(void)
1398 } 1398 }
1399} 1399}
1400 1400
1401void __cpuinit r4k_cache_init(void) 1401void r4k_cache_init(void)
1402{ 1402{
1403 extern void build_clear_page(void); 1403 extern void build_clear_page(void);
1404 extern void build_copy_page(void); 1404 extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index ba9da270289f..8d909dbbf37f 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -344,7 +344,7 @@ static __init void tx39_probe_cache(void)
344 } 344 }
345} 345}
346 346
347void __cpuinit tx39_cache_init(void) 347void tx39_cache_init(void)
348{ 348{
349 extern void build_clear_page(void); 349 extern void build_clear_page(void);
350 extern void build_copy_page(void); 350 extern void build_copy_page(void);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 5aeb3eb0b72f..15f813c303b4 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -182,7 +182,7 @@ static inline void setup_protection_map(void)
182 } 182 }
183} 183}
184 184
185void __cpuinit cpu_cache_init(void) 185void cpu_cache_init(void)
186{ 186{
187 if (cpu_has_3k_cache) { 187 if (cpu_has_3k_cache) {
188 extern void __weak r3k_cache_init(void); 188 extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index fe1d887e8d70..191cf6e0c725 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -49,8 +49,6 @@
49 * (0x170-0x17f) are used to preserve k0, k1, and ra. 49 * (0x170-0x17f) are used to preserve k0, k1, and ra.
50 */ 50 */
51 51
52 __CPUINIT
53
54LEAF(except_vec2_sb1) 52LEAF(except_vec2_sb1)
55 /* 53 /*
56 * If this error is recoverable, we need to exit the handler 54 * If this error is recoverable, we need to exit the handler
@@ -142,8 +140,6 @@ unrecoverable:
142 140
143END(except_vec2_sb1) 141END(except_vec2_sb1)
144 142
145 __FINIT
146
147 LEAF(handle_vec2_sb1) 143 LEAF(handle_vec2_sb1)
148 mfc0 k0,CP0_CONFIG 144 mfc0 k0,CP0_CONFIG
149 li k1,~CONF_CM_CMASK 145 li k1,~CONF_CM_CMASK
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 2c0bd580b9da..218c2109a55d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both)
66UASM_L_LA(_copy_pref_store) 66UASM_L_LA(_copy_pref_store)
67 67
68/* We need one branch and therefore one relocation per target label. */ 68/* We need one branch and therefore one relocation per target label. */
69static struct uasm_label __cpuinitdata labels[5]; 69static struct uasm_label labels[5];
70static struct uasm_reloc __cpuinitdata relocs[5]; 70static struct uasm_reloc relocs[5];
71 71
72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 74
75static int pref_bias_clear_store __cpuinitdata; 75static int pref_bias_clear_store;
76static int pref_bias_copy_load __cpuinitdata; 76static int pref_bias_copy_load;
77static int pref_bias_copy_store __cpuinitdata; 77static int pref_bias_copy_store;
78 78
79static u32 pref_src_mode __cpuinitdata; 79static u32 pref_src_mode;
80static u32 pref_dst_mode __cpuinitdata; 80static u32 pref_dst_mode;
81 81
82static int clear_word_size __cpuinitdata; 82static int clear_word_size;
83static int copy_word_size __cpuinitdata; 83static int copy_word_size;
84 84
85static int half_clear_loop_size __cpuinitdata; 85static int half_clear_loop_size;
86static int half_copy_loop_size __cpuinitdata; 86static int half_copy_loop_size;
87 87
88static int cache_line_size __cpuinitdata; 88static int cache_line_size;
89#define cache_line_mask() (cache_line_size - 1) 89#define cache_line_mask() (cache_line_size - 1)
90 90
91static inline void __cpuinit 91static inline void
92pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) 92pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
93{ 93{
94 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { 94 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
108 } 108 }
109} 109}
110 110
111static void __cpuinit set_prefetch_parameters(void) 111static void set_prefetch_parameters(void)
112{ 112{
113 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) 113 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
114 clear_word_size = 8; 114 clear_word_size = 8;
@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void)
199 4 * copy_word_size)); 199 4 * copy_word_size));
200} 200}
201 201
202static void __cpuinit build_clear_store(u32 **buf, int off) 202static void build_clear_store(u32 **buf, int off)
203{ 203{
204 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { 204 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
205 uasm_i_sd(buf, ZERO, off, A0); 205 uasm_i_sd(buf, ZERO, off, A0);
@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off)
208 } 208 }
209} 209}
210 210
211static inline void __cpuinit build_clear_pref(u32 **buf, int off) 211static inline void build_clear_pref(u32 **buf, int off)
212{ 212{
213 if (off & cache_line_mask()) 213 if (off & cache_line_mask())
214 return; 214 return;
@@ -240,7 +240,7 @@ extern u32 __clear_page_end;
240extern u32 __copy_page_start; 240extern u32 __copy_page_start;
241extern u32 __copy_page_end; 241extern u32 __copy_page_end;
242 242
243void __cpuinit build_clear_page(void) 243void build_clear_page(void)
244{ 244{
245 int off; 245 int off;
246 u32 *buf = &__clear_page_start; 246 u32 *buf = &__clear_page_start;
@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void)
333 pr_debug("\t.set pop\n"); 333 pr_debug("\t.set pop\n");
334} 334}
335 335
336static void __cpuinit build_copy_load(u32 **buf, int reg, int off) 336static void build_copy_load(u32 **buf, int reg, int off)
337{ 337{
338 if (cpu_has_64bit_gp_regs) { 338 if (cpu_has_64bit_gp_regs) {
339 uasm_i_ld(buf, reg, off, A1); 339 uasm_i_ld(buf, reg, off, A1);
@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
342 } 342 }
343} 343}
344 344
345static void __cpuinit build_copy_store(u32 **buf, int reg, int off) 345static void build_copy_store(u32 **buf, int reg, int off)
346{ 346{
347 if (cpu_has_64bit_gp_regs) { 347 if (cpu_has_64bit_gp_regs) {
348 uasm_i_sd(buf, reg, off, A0); 348 uasm_i_sd(buf, reg, off, A0);
@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
387 } 387 }
388} 388}
389 389
390void __cpuinit build_copy_page(void) 390void build_copy_page(void)
391{ 391{
392 int off; 392 int off;
393 u32 *buf = &__copy_page_start; 393 u32 *buf = &__copy_page_start;
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index c6aaed934d53..dc7c5a5214a9 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -167,7 +167,7 @@ static struct bcache_ops indy_sc_ops = {
167 .bc_inv = indy_sc_wback_invalidate 167 .bc_inv = indy_sc_wback_invalidate
168}; 168};
169 169
170void __cpuinit indy_sc_init(void) 170void indy_sc_init(void)
171{ 171{
172 if (indy_sc_probe()) { 172 if (indy_sc_probe()) {
173 indy_sc_enable(); 173 indy_sc_enable();
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index df96da7e939b..5d01392e3518 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -132,7 +132,7 @@ static inline int __init mips_sc_probe(void)
132 return 1; 132 return 1;
133} 133}
134 134
135int __cpuinit mips_sc_init(void) 135int mips_sc_init(void)
136{ 136{
137 int found = mips_sc_probe(); 137 int found = mips_sc_probe();
138 if (found) { 138 if (found) {
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8bc67720e145..0216ed6eaa2a 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -98,7 +98,7 @@ static struct bcache_ops r5k_sc_ops = {
98 .bc_inv = r5k_dma_cache_inv_sc 98 .bc_inv = r5k_dma_cache_inv_sc
99}; 99};
100 100
101void __cpuinit r5k_sc_init(void) 101void r5k_sc_init(void)
102{ 102{
103 if (r5k_sc_probe()) { 103 if (r5k_sc_probe()) {
104 r5k_sc_enable(); 104 r5k_sc_enable();
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 274af3be1442..aaffbba33706 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -104,7 +104,7 @@ static void blast_rm7k_tcache(void)
104/* 104/*
105 * This function is executed in uncached address space. 105 * This function is executed in uncached address space.
106 */ 106 */
107static __cpuinit void __rm7k_tc_enable(void) 107static void __rm7k_tc_enable(void)
108{ 108{
109 int i; 109 int i;
110 110
@@ -117,7 +117,7 @@ static __cpuinit void __rm7k_tc_enable(void)
117 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i)); 117 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
118} 118}
119 119
120static __cpuinit void rm7k_tc_enable(void) 120static void rm7k_tc_enable(void)
121{ 121{
122 if (read_c0_config() & RM7K_CONF_TE) 122 if (read_c0_config() & RM7K_CONF_TE)
123 return; 123 return;
@@ -130,7 +130,7 @@ static __cpuinit void rm7k_tc_enable(void)
130/* 130/*
131 * This function is executed in uncached address space. 131 * This function is executed in uncached address space.
132 */ 132 */
133static __cpuinit void __rm7k_sc_enable(void) 133static void __rm7k_sc_enable(void)
134{ 134{
135 int i; 135 int i;
136 136
@@ -143,7 +143,7 @@ static __cpuinit void __rm7k_sc_enable(void)
143 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i)); 143 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
144} 144}
145 145
146static __cpuinit void rm7k_sc_enable(void) 146static void rm7k_sc_enable(void)
147{ 147{
148 if (read_c0_config() & RM7K_CONF_SE) 148 if (read_c0_config() & RM7K_CONF_SE)
149 return; 149 return;
@@ -184,7 +184,7 @@ static struct bcache_ops rm7k_sc_ops = {
184 * This is a probing function like the one found in c-r4k.c, we look for the 184 * This is a probing function like the one found in c-r4k.c, we look for the
185 * wrap around point with different addresses. 185 * wrap around point with different addresses.
186 */ 186 */
187static __cpuinit void __probe_tcache(void) 187static void __probe_tcache(void)
188{ 188{
189 unsigned long flags, addr, begin, end, pow2; 189 unsigned long flags, addr, begin, end, pow2;
190 190
@@ -226,7 +226,7 @@ static __cpuinit void __probe_tcache(void)
226 local_irq_restore(flags); 226 local_irq_restore(flags);
227} 227}
228 228
229void __cpuinit rm7k_sc_init(void) 229void rm7k_sc_init(void)
230{ 230{
231 struct cpuinfo_mips *c = &current_cpu_data; 231 struct cpuinfo_mips *c = &current_cpu_data;
232 unsigned int config = read_c0_config(); 232 unsigned int config = read_c0_config();
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..9aca10994cd2 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -276,7 +276,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
276 } 276 }
277} 277}
278 278
279void __cpuinit tlb_init(void) 279void tlb_init(void)
280{ 280{
281 local_flush_tlb_all(); 281 local_flush_tlb_all();
282 282
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c643de4c473a..00b26a67a06d 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -389,7 +389,7 @@ int __init has_transparent_hugepage(void)
389 389
390#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 390#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
391 391
392static int __cpuinitdata ntlb; 392static int ntlb;
393static int __init set_ntlb(char *str) 393static int __init set_ntlb(char *str)
394{ 394{
395 get_option(&str, &ntlb); 395 get_option(&str, &ntlb);
@@ -398,7 +398,7 @@ static int __init set_ntlb(char *str)
398 398
399__setup("ntlb=", set_ntlb); 399__setup("ntlb=", set_ntlb);
400 400
401void __cpuinit tlb_init(void) 401void tlb_init(void)
402{ 402{
403 /* 403 /*
404 * You should never change this register: 404 * You should never change this register:
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..6a99733a4440 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -213,14 +213,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
213 local_irq_restore(flags); 213 local_irq_restore(flags);
214} 214}
215 215
216static void __cpuinit probe_tlb(unsigned long config) 216static void probe_tlb(unsigned long config)
217{ 217{
218 struct cpuinfo_mips *c = &current_cpu_data; 218 struct cpuinfo_mips *c = &current_cpu_data;
219 219
220 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ 220 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
221} 221}
222 222
223void __cpuinit tlb_init(void) 223void tlb_init(void)
224{ 224{
225 unsigned int config = read_c0_config(); 225 unsigned int config = read_c0_config();
226 unsigned long status; 226 unsigned long status;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9ab0f907a52c..556cb4815770 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -136,7 +136,7 @@ static int scratchpad_offset(int i)
136 * why; it's not an issue caused by the core RTL. 136 * why; it's not an issue caused by the core RTL.
137 * 137 *
138 */ 138 */
139static int __cpuinit m4kc_tlbp_war(void) 139static int m4kc_tlbp_war(void)
140{ 140{
141 return (current_cpu_data.processor_id & 0xffff00) == 141 return (current_cpu_data.processor_id & 0xffff00) ==
142 (PRID_COMP_MIPS | PRID_IMP_4KC); 142 (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault)
181UASM_L_LA(_tlb_huge_update) 181UASM_L_LA(_tlb_huge_update)
182#endif 182#endif
183 183
184static int __cpuinitdata hazard_instance; 184static int hazard_instance;
185 185
186static void __cpuinit uasm_bgezl_hazard(u32 **p, 186static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
187 struct uasm_reloc **r,
188 int instance)
189{ 187{
190 switch (instance) { 188 switch (instance) {
191 case 0 ... 7: 189 case 0 ... 7:
@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p,
196 } 194 }
197} 195}
198 196
199static void __cpuinit uasm_bgezl_label(struct uasm_label **l, 197static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
200 u32 **p,
201 int instance)
202{ 198{
203 switch (instance) { 199 switch (instance) {
204 case 0 ... 7: 200 case 0 ... 7:
@@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun
295 * We deliberately chose a buffer size of 128, so we won't scribble 291 * We deliberately chose a buffer size of 128, so we won't scribble
296 * over anything important on overflow before we panic. 292 * over anything important on overflow before we panic.
297 */ 293 */
298static u32 tlb_handler[128] __cpuinitdata; 294static u32 tlb_handler[128];
299 295
300/* simply assume worst case size for labels and relocs */ 296/* simply assume worst case size for labels and relocs */
301static struct uasm_label labels[128] __cpuinitdata; 297static struct uasm_label labels[128];
302static struct uasm_reloc relocs[128] __cpuinitdata; 298static struct uasm_reloc relocs[128];
303 299
304static int check_for_high_segbits __cpuinitdata; 300static int check_for_high_segbits;
305 301
306static unsigned int kscratch_used_mask __cpuinitdata; 302static unsigned int kscratch_used_mask;
307 303
308static inline int __maybe_unused c0_kscratch(void) 304static inline int __maybe_unused c0_kscratch(void)
309{ 305{
@@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void)
316 } 312 }
317} 313}
318 314
319static int __cpuinit allocate_kscratch(void) 315static int allocate_kscratch(void)
320{ 316{
321 int r; 317 int r;
322 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 318 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
@@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void)
333 return r; 329 return r;
334} 330}
335 331
336static int scratch_reg __cpuinitdata; 332static int scratch_reg;
337static int pgd_reg __cpuinitdata; 333static int pgd_reg;
338enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 334enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
339 335
340static struct work_registers __cpuinit build_get_work_registers(u32 **p) 336static struct work_registers build_get_work_registers(u32 **p)
341{ 337{
342 struct work_registers r; 338 struct work_registers r;
343 339
@@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
393 return r; 389 return r;
394} 390}
395 391
396static void __cpuinit build_restore_work_registers(u32 **p) 392static void build_restore_work_registers(u32 **p)
397{ 393{
398 if (scratch_reg >= 0) { 394 if (scratch_reg >= 0) {
399 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 395 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
@@ -418,7 +414,7 @@ extern unsigned long pgd_current[];
418/* 414/*
419 * The R3000 TLB handler is simple. 415 * The R3000 TLB handler is simple.
420 */ 416 */
421static void __cpuinit build_r3000_tlb_refill_handler(void) 417static void build_r3000_tlb_refill_handler(void)
422{ 418{
423 long pgdc = (long)pgd_current; 419 long pgdc = (long)pgd_current;
424 u32 *p; 420 u32 *p;
@@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
463 * other one.To keep things simple, we first assume linear space, 459 * other one.To keep things simple, we first assume linear space,
464 * then we relocate it to the final handler layout as needed. 460 * then we relocate it to the final handler layout as needed.
465 */ 461 */
466static u32 final_handler[64] __cpuinitdata; 462static u32 final_handler[64];
467 463
468/* 464/*
469 * Hazards 465 * Hazards
@@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata;
487 * 483 *
488 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 484 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
489 */ 485 */
490static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) 486static void __maybe_unused build_tlb_probe_entry(u32 **p)
491{ 487{
492 switch (current_cpu_type()) { 488 switch (current_cpu_type()) {
493 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 489 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
@@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
511 */ 507 */
512enum tlb_write_entry { tlb_random, tlb_indexed }; 508enum tlb_write_entry { tlb_random, tlb_indexed };
513 509
514static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, 510static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
515 struct uasm_reloc **r, 511 struct uasm_reloc **r,
516 enum tlb_write_entry wmode) 512 enum tlb_write_entry wmode)
517{ 513{
518 void(*tlbw)(u32 **) = NULL; 514 void(*tlbw)(u32 **) = NULL;
519 515
@@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
647 } 643 }
648} 644}
649 645
650static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 646static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
651 unsigned int reg) 647 unsigned int reg)
652{ 648{
653 if (cpu_has_rixi) { 649 if (cpu_has_rixi) {
654 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 650 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
663 659
664#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 660#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
665 661
666static __cpuinit void build_restore_pagemask(u32 **p, 662static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
667 struct uasm_reloc **r, 663 unsigned int tmp, enum label_id lid,
668 unsigned int tmp, 664 int restore_scratch)
669 enum label_id lid,
670 int restore_scratch)
671{ 665{
672 if (restore_scratch) { 666 if (restore_scratch) {
673 /* Reset default page size */ 667 /* Reset default page size */
@@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p,
706 } 700 }
707} 701}
708 702
709static __cpuinit void build_huge_tlb_write_entry(u32 **p, 703static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
710 struct uasm_label **l, 704 struct uasm_reloc **r,
711 struct uasm_reloc **r, 705 unsigned int tmp,
712 unsigned int tmp, 706 enum tlb_write_entry wmode,
713 enum tlb_write_entry wmode, 707 int restore_scratch)
714 int restore_scratch)
715{ 708{
716 /* Set huge page tlb entry size */ 709 /* Set huge page tlb entry size */
717 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 710 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
726/* 719/*
727 * Check if Huge PTE is present, if so then jump to LABEL. 720 * Check if Huge PTE is present, if so then jump to LABEL.
728 */ 721 */
729static void __cpuinit 722static void
730build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 723build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
731 unsigned int pmd, int lid) 724 unsigned int pmd, int lid)
732{ 725{
733 UASM_i_LW(p, tmp, 0, pmd); 726 UASM_i_LW(p, tmp, 0, pmd);
734 if (use_bbit_insns()) { 727 if (use_bbit_insns()) {
@@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
739 } 732 }
740} 733}
741 734
742static __cpuinit void build_huge_update_entries(u32 **p, 735static void build_huge_update_entries(u32 **p, unsigned int pte,
743 unsigned int pte, 736 unsigned int tmp)
744 unsigned int tmp)
745{ 737{
746 int small_sequence; 738 int small_sequence;
747 739
@@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p,
771 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 763 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
772} 764}
773 765
774static __cpuinit void build_huge_handler_tail(u32 **p, 766static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
775 struct uasm_reloc **r, 767 struct uasm_label **l,
776 struct uasm_label **l, 768 unsigned int pte,
777 unsigned int pte, 769 unsigned int ptr)
778 unsigned int ptr)
779{ 770{
780#ifdef CONFIG_SMP 771#ifdef CONFIG_SMP
781 UASM_i_SC(p, pte, 0, ptr); 772 UASM_i_SC(p, pte, 0, ptr);
@@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
794 * TMP and PTR are scratch. 785 * TMP and PTR are scratch.
795 * TMP will be clobbered, PTR will hold the pmd entry. 786 * TMP will be clobbered, PTR will hold the pmd entry.
796 */ 787 */
797static void __cpuinit 788static void
798build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 789build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
799 unsigned int tmp, unsigned int ptr) 790 unsigned int tmp, unsigned int ptr)
800{ 791{
@@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
886 * BVADDR is the faulting address, PTR is scratch. 877 * BVADDR is the faulting address, PTR is scratch.
887 * PTR will hold the pgd for vmalloc. 878 * PTR will hold the pgd for vmalloc.
888 */ 879 */
889static void __cpuinit 880static void
890build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 881build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
891 unsigned int bvaddr, unsigned int ptr, 882 unsigned int bvaddr, unsigned int ptr,
892 enum vmalloc64_mode mode) 883 enum vmalloc64_mode mode)
@@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
956 * TMP and PTR are scratch. 947 * TMP and PTR are scratch.
957 * TMP will be clobbered, PTR will hold the pgd entry. 948 * TMP will be clobbered, PTR will hold the pgd entry.
958 */ 949 */
959static void __cpuinit __maybe_unused 950static void __maybe_unused
960build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 951build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
961{ 952{
962 long pgdc = (long)pgd_current; 953 long pgdc = (long)pgd_current;
@@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
991 982
992#endif /* !CONFIG_64BIT */ 983#endif /* !CONFIG_64BIT */
993 984
994static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) 985static void build_adjust_context(u32 **p, unsigned int ctx)
995{ 986{
996 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 987 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
997 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 988 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
1017 uasm_i_andi(p, ctx, ctx, mask); 1008 uasm_i_andi(p, ctx, ctx, mask);
1018} 1009}
1019 1010
1020static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1011static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1021{ 1012{
1022 /* 1013 /*
1023 * Bug workaround for the Nevada. It seems as if under certain 1014 * Bug workaround for the Nevada. It seems as if under certain
@@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr
1042 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1033 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1043} 1034}
1044 1035
1045static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, 1036static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1046 unsigned int ptep)
1047{ 1037{
1048 /* 1038 /*
1049 * 64bit address support (36bit on a 32bit CPU) in a 32bit 1039 * 64bit address support (36bit on a 32bit CPU) in a 32bit
@@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info {
1104 int restore_scratch; 1094 int restore_scratch;
1105}; 1095};
1106 1096
1107static struct mips_huge_tlb_info __cpuinit 1097static struct mips_huge_tlb_info
1108build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1098build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1109 struct uasm_reloc **r, unsigned int tmp, 1099 struct uasm_reloc **r, unsigned int tmp,
1110 unsigned int ptr, int c0_scratch_reg) 1100 unsigned int ptr, int c0_scratch_reg)
@@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1282 */ 1272 */
1283#define MIPS64_REFILL_INSNS 32 1273#define MIPS64_REFILL_INSNS 32
1284 1274
1285static void __cpuinit build_r4000_tlb_refill_handler(void) 1275static void build_r4000_tlb_refill_handler(void)
1286{ 1276{
1287 u32 *p = tlb_handler; 1277 u32 *p = tlb_handler;
1288 struct uasm_label *l = labels; 1278 struct uasm_label *l = labels;
@@ -1462,11 +1452,11 @@ extern u32 handle_tlbm[], handle_tlbm_end[];
1462#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1452#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1463extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; 1453extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
1464 1454
1465static void __cpuinit build_r4000_setup_pgd(void) 1455static void build_r4000_setup_pgd(void)
1466{ 1456{
1467 const int a0 = 4; 1457 const int a0 = 4;
1468 const int a1 = 5; 1458 const int a1 = 5;
1469 u32 *p = tlbmiss_handler_setup_pgd_array; 1459 u32 *p = tlbmiss_handler_setup_pgd;
1470 const int tlbmiss_handler_setup_pgd_size = 1460 const int tlbmiss_handler_setup_pgd_size =
1471 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; 1461 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
1472 struct uasm_label *l = labels; 1462 struct uasm_label *l = labels;
@@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void)
1513} 1503}
1514#endif 1504#endif
1515 1505
1516static void __cpuinit 1506static void
1517iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1507iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1518{ 1508{
1519#ifdef CONFIG_SMP 1509#ifdef CONFIG_SMP
@@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1533#endif 1523#endif
1534} 1524}
1535 1525
1536static void __cpuinit 1526static void
1537iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1527iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1538 unsigned int mode) 1528 unsigned int mode)
1539{ 1529{
@@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1593 * the page table where this PTE is located, PTE will be re-loaded 1583 * the page table where this PTE is located, PTE will be re-loaded
1594 * with it's original value. 1584 * with it's original value.
1595 */ 1585 */
1596static void __cpuinit 1586static void
1597build_pte_present(u32 **p, struct uasm_reloc **r, 1587build_pte_present(u32 **p, struct uasm_reloc **r,
1598 int pte, int ptr, int scratch, enum label_id lid) 1588 int pte, int ptr, int scratch, enum label_id lid)
1599{ 1589{
@@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
1621} 1611}
1622 1612
1623/* Make PTE valid, store result in PTR. */ 1613/* Make PTE valid, store result in PTR. */
1624static void __cpuinit 1614static void
1625build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1615build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1626 unsigned int ptr) 1616 unsigned int ptr)
1627{ 1617{
@@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1634 * Check if PTE can be written to, if not branch to LABEL. Regardless 1624 * Check if PTE can be written to, if not branch to LABEL. Regardless
1635 * restore PTE with value from PTR when done. 1625 * restore PTE with value from PTR when done.
1636 */ 1626 */
1637static void __cpuinit 1627static void
1638build_pte_writable(u32 **p, struct uasm_reloc **r, 1628build_pte_writable(u32 **p, struct uasm_reloc **r,
1639 unsigned int pte, unsigned int ptr, int scratch, 1629 unsigned int pte, unsigned int ptr, int scratch,
1640 enum label_id lid) 1630 enum label_id lid)
@@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
1654/* Make PTE writable, update software status bits as well, then store 1644/* Make PTE writable, update software status bits as well, then store
1655 * at PTR. 1645 * at PTR.
1656 */ 1646 */
1657static void __cpuinit 1647static void
1658build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1648build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1659 unsigned int ptr) 1649 unsigned int ptr)
1660{ 1650{
@@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1668 * Check if PTE can be modified, if not branch to LABEL. Regardless 1658 * Check if PTE can be modified, if not branch to LABEL. Regardless
1669 * restore PTE with value from PTR when done. 1659 * restore PTE with value from PTR when done.
1670 */ 1660 */
1671static void __cpuinit 1661static void
1672build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1662build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1673 unsigned int pte, unsigned int ptr, int scratch, 1663 unsigned int pte, unsigned int ptr, int scratch,
1674 enum label_id lid) 1664 enum label_id lid)
@@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1697 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1687 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1698 * Then it returns. 1688 * Then it returns.
1699 */ 1689 */
1700static void __cpuinit 1690static void
1701build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1691build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1702{ 1692{
1703 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1693 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1713 * may have the probe fail bit set as a result of a trap on a 1703 * may have the probe fail bit set as a result of a trap on a
1714 * kseg2 access, i.e. without refill. Then it returns. 1704 * kseg2 access, i.e. without refill. Then it returns.
1715 */ 1705 */
1716static void __cpuinit 1706static void
1717build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1707build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1718 struct uasm_reloc **r, unsigned int pte, 1708 struct uasm_reloc **r, unsigned int pte,
1719 unsigned int tmp) 1709 unsigned int tmp)
@@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1731 uasm_i_rfe(p); /* branch delay */ 1721 uasm_i_rfe(p); /* branch delay */
1732} 1722}
1733 1723
1734static void __cpuinit 1724static void
1735build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1725build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1736 unsigned int ptr) 1726 unsigned int ptr)
1737{ 1727{
@@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1751 uasm_i_tlbp(p); /* load delay */ 1741 uasm_i_tlbp(p); /* load delay */
1752} 1742}
1753 1743
1754static void __cpuinit build_r3000_tlb_load_handler(void) 1744static void build_r3000_tlb_load_handler(void)
1755{ 1745{
1756 u32 *p = handle_tlbl; 1746 u32 *p = handle_tlbl;
1757 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1747 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
1782 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); 1772 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
1783} 1773}
1784 1774
1785static void __cpuinit build_r3000_tlb_store_handler(void) 1775static void build_r3000_tlb_store_handler(void)
1786{ 1776{
1787 u32 *p = handle_tlbs; 1777 u32 *p = handle_tlbs;
1788 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 1778 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -1803,7 +1793,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1803 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1793 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1804 uasm_i_nop(&p); 1794 uasm_i_nop(&p);
1805 1795
1806 if (p >= handle_tlbs) 1796 if (p >= handle_tlbs_end)
1807 panic("TLB store handler fastpath space exceeded"); 1797 panic("TLB store handler fastpath space exceeded");
1808 1798
1809 uasm_resolve_relocs(relocs, labels); 1799 uasm_resolve_relocs(relocs, labels);
@@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1813 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); 1803 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
1814} 1804}
1815 1805
1816static void __cpuinit build_r3000_tlb_modify_handler(void) 1806static void build_r3000_tlb_modify_handler(void)
1817{ 1807{
1818 u32 *p = handle_tlbm; 1808 u32 *p = handle_tlbm;
1819 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 1809 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1848/* 1838/*
1849 * R4000 style TLB load/store/modify handlers. 1839 * R4000 style TLB load/store/modify handlers.
1850 */ 1840 */
1851static struct work_registers __cpuinit 1841static struct work_registers
1852build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1842build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1853 struct uasm_reloc **r) 1843 struct uasm_reloc **r)
1854{ 1844{
@@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1884 return wr; 1874 return wr;
1885} 1875}
1886 1876
1887static void __cpuinit 1877static void
1888build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1878build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1889 struct uasm_reloc **r, unsigned int tmp, 1879 struct uasm_reloc **r, unsigned int tmp,
1890 unsigned int ptr) 1880 unsigned int ptr)
@@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1902#endif 1892#endif
1903} 1893}
1904 1894
1905static void __cpuinit build_r4000_tlb_load_handler(void) 1895static void build_r4000_tlb_load_handler(void)
1906{ 1896{
1907 u32 *p = handle_tlbl; 1897 u32 *p = handle_tlbl;
1908 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1898 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
2085 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); 2075 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
2086} 2076}
2087 2077
2088static void __cpuinit build_r4000_tlb_store_handler(void) 2078static void build_r4000_tlb_store_handler(void)
2089{ 2079{
2090 u32 *p = handle_tlbs; 2080 u32 *p = handle_tlbs;
2091 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 2081 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
2140 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); 2130 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
2141} 2131}
2142 2132
2143static void __cpuinit build_r4000_tlb_modify_handler(void) 2133static void build_r4000_tlb_modify_handler(void)
2144{ 2134{
2145 u32 *p = handle_tlbm; 2135 u32 *p = handle_tlbm;
2146 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 2136 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
2196 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); 2186 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
2197} 2187}
2198 2188
2199static void __cpuinit flush_tlb_handlers(void) 2189static void flush_tlb_handlers(void)
2200{ 2190{
2201 local_flush_icache_range((unsigned long)handle_tlbl, 2191 local_flush_icache_range((unsigned long)handle_tlbl,
2202 (unsigned long)handle_tlbl_end); 2192 (unsigned long)handle_tlbl_end);
@@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void)
2210#endif 2200#endif
2211} 2201}
2212 2202
2213void __cpuinit build_tlb_refill_handler(void) 2203void build_tlb_refill_handler(void)
2214{ 2204{
2215 /* 2205 /*
2216 * The refill handler is generated per-CPU, multi-node systems 2206 * The refill handler is generated per-CPU, multi-node systems
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 162ee6d62788..060000fa653c 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -49,7 +49,7 @@
49 49
50#include "uasm.c" 50#include "uasm.c"
51 51
52static struct insn insn_table_MM[] __uasminitdata = { 52static struct insn insn_table_MM[] = {
53 { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, 53 { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
54 { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 54 { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
55 { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, 55 { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
@@ -118,7 +118,7 @@ static struct insn insn_table_MM[] __uasminitdata = {
118 118
119#undef M 119#undef M
120 120
121static inline __uasminit u32 build_bimm(s32 arg) 121static inline u32 build_bimm(s32 arg)
122{ 122{
123 WARN(arg > 0xffff || arg < -0x10000, 123 WARN(arg > 0xffff || arg < -0x10000,
124 KERN_WARNING "Micro-assembler field overflow\n"); 124 KERN_WARNING "Micro-assembler field overflow\n");
@@ -128,7 +128,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
128 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); 128 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
129} 129}
130 130
131static inline __uasminit u32 build_jimm(u32 arg) 131static inline u32 build_jimm(u32 arg)
132{ 132{
133 133
134 WARN(arg & ~((JIMM_MASK << 2) | 1), 134 WARN(arg & ~((JIMM_MASK << 2) | 1),
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
141 * The order of opcode arguments is implicitly left to right, 141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM. 142 * starting with RS and ending with FUNC or IMM.
143 */ 143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) 144static void build_insn(u32 **buf, enum opcode opc, ...)
145{ 145{
146 struct insn *ip = NULL; 146 struct insn *ip = NULL;
147 unsigned int i; 147 unsigned int i;
@@ -199,7 +199,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
199 (*buf)++; 199 (*buf)++;
200} 200}
201 201
202static inline void __uasminit 202static inline void
203__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 203__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
204{ 204{
205 long laddr = (long)lab->addr; 205 long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 5fcdd8fe3e83..0c724589854e 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -49,7 +49,7 @@
49 49
50#include "uasm.c" 50#include "uasm.c"
51 51
52static struct insn insn_table[] __uasminitdata = { 52static struct insn insn_table[] = {
53 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 53 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
54 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 54 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
55 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 55 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
@@ -119,7 +119,7 @@ static struct insn insn_table[] __uasminitdata = {
119 119
120#undef M 120#undef M
121 121
122static inline __uasminit u32 build_bimm(s32 arg) 122static inline u32 build_bimm(s32 arg)
123{ 123{
124 WARN(arg > 0x1ffff || arg < -0x20000, 124 WARN(arg > 0x1ffff || arg < -0x20000,
125 KERN_WARNING "Micro-assembler field overflow\n"); 125 KERN_WARNING "Micro-assembler field overflow\n");
@@ -129,7 +129,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
129 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 129 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
130} 130}
131 131
132static inline __uasminit u32 build_jimm(u32 arg) 132static inline u32 build_jimm(u32 arg)
133{ 133{
134 WARN(arg & ~(JIMM_MASK << 2), 134 WARN(arg & ~(JIMM_MASK << 2),
135 KERN_WARNING "Micro-assembler field overflow\n"); 135 KERN_WARNING "Micro-assembler field overflow\n");
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
141 * The order of opcode arguments is implicitly left to right, 141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM. 142 * starting with RS and ending with FUNC or IMM.
143 */ 143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) 144static void build_insn(u32 **buf, enum opcode opc, ...)
145{ 145{
146 struct insn *ip = NULL; 146 struct insn *ip = NULL;
147 unsigned int i; 147 unsigned int i;
@@ -187,7 +187,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
187 (*buf)++; 187 (*buf)++;
188} 188}
189 189
190static inline void __uasminit 190static inline void
191__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 191__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
192{ 192{
193 long laddr = (long)lab->addr; 193 long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 7eb5e4355d25..b9d14b6c7f58 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -63,35 +63,35 @@ struct insn {
63 enum fields fields; 63 enum fields fields;
64}; 64};
65 65
66static inline __uasminit u32 build_rs(u32 arg) 66static inline u32 build_rs(u32 arg)
67{ 67{
68 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 68 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
69 69
70 return (arg & RS_MASK) << RS_SH; 70 return (arg & RS_MASK) << RS_SH;
71} 71}
72 72
73static inline __uasminit u32 build_rt(u32 arg) 73static inline u32 build_rt(u32 arg)
74{ 74{
75 WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 75 WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
76 76
77 return (arg & RT_MASK) << RT_SH; 77 return (arg & RT_MASK) << RT_SH;
78} 78}
79 79
80static inline __uasminit u32 build_rd(u32 arg) 80static inline u32 build_rd(u32 arg)
81{ 81{
82 WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 82 WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
83 83
84 return (arg & RD_MASK) << RD_SH; 84 return (arg & RD_MASK) << RD_SH;
85} 85}
86 86
87static inline __uasminit u32 build_re(u32 arg) 87static inline u32 build_re(u32 arg)
88{ 88{
89 WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 89 WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
90 90
91 return (arg & RE_MASK) << RE_SH; 91 return (arg & RE_MASK) << RE_SH;
92} 92}
93 93
94static inline __uasminit u32 build_simm(s32 arg) 94static inline u32 build_simm(s32 arg)
95{ 95{
96 WARN(arg > 0x7fff || arg < -0x8000, 96 WARN(arg > 0x7fff || arg < -0x8000,
97 KERN_WARNING "Micro-assembler field overflow\n"); 97 KERN_WARNING "Micro-assembler field overflow\n");
@@ -99,14 +99,14 @@ static inline __uasminit u32 build_simm(s32 arg)
99 return arg & 0xffff; 99 return arg & 0xffff;
100} 100}
101 101
102static inline __uasminit u32 build_uimm(u32 arg) 102static inline u32 build_uimm(u32 arg)
103{ 103{
104 WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 104 WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
105 105
106 return arg & IMM_MASK; 106 return arg & IMM_MASK;
107} 107}
108 108
109static inline __uasminit u32 build_scimm(u32 arg) 109static inline u32 build_scimm(u32 arg)
110{ 110{
111 WARN(arg & ~SCIMM_MASK, 111 WARN(arg & ~SCIMM_MASK,
112 KERN_WARNING "Micro-assembler field overflow\n"); 112 KERN_WARNING "Micro-assembler field overflow\n");
@@ -114,21 +114,21 @@ static inline __uasminit u32 build_scimm(u32 arg)
114 return (arg & SCIMM_MASK) << SCIMM_SH; 114 return (arg & SCIMM_MASK) << SCIMM_SH;
115} 115}
116 116
117static inline __uasminit u32 build_func(u32 arg) 117static inline u32 build_func(u32 arg)
118{ 118{
119 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 119 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
120 120
121 return arg & FUNC_MASK; 121 return arg & FUNC_MASK;
122} 122}
123 123
124static inline __uasminit u32 build_set(u32 arg) 124static inline u32 build_set(u32 arg)
125{ 125{
126 WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 126 WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
127 127
128 return arg & SET_MASK; 128 return arg & SET_MASK;
129} 129}
130 130
131static void __uasminit build_insn(u32 **buf, enum opcode opc, ...); 131static void build_insn(u32 **buf, enum opcode opc, ...);
132 132
133#define I_u1u2u3(op) \ 133#define I_u1u2u3(op) \
134Ip_u1u2u3(op) \ 134Ip_u1u2u3(op) \
@@ -286,7 +286,7 @@ I_u3u1u2(_ldx)
286 286
287#ifdef CONFIG_CPU_CAVIUM_OCTEON 287#ifdef CONFIG_CPU_CAVIUM_OCTEON
288#include <asm/octeon/octeon.h> 288#include <asm/octeon/octeon.h>
289void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, 289void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
290 unsigned int c) 290 unsigned int c)
291{ 291{
292 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 292 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -304,7 +304,7 @@ I_u2s3u1(_pref)
304#endif 304#endif
305 305
306/* Handle labels. */ 306/* Handle labels. */
307void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) 307void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
308{ 308{
309 (*lab)->addr = addr; 309 (*lab)->addr = addr;
310 (*lab)->lab = lid; 310 (*lab)->lab = lid;
@@ -312,7 +312,7 @@ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, in
312} 312}
313UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); 313UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
314 314
315int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) 315int ISAFUNC(uasm_in_compat_space_p)(long addr)
316{ 316{
317 /* Is this address in 32bit compat space? */ 317 /* Is this address in 32bit compat space? */
318#ifdef CONFIG_64BIT 318#ifdef CONFIG_64BIT
@@ -323,7 +323,7 @@ int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
323} 323}
324UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); 324UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
325 325
326static int __uasminit uasm_rel_highest(long val) 326static int uasm_rel_highest(long val)
327{ 327{
328#ifdef CONFIG_64BIT 328#ifdef CONFIG_64BIT
329 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 329 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -332,7 +332,7 @@ static int __uasminit uasm_rel_highest(long val)
332#endif 332#endif
333} 333}
334 334
335static int __uasminit uasm_rel_higher(long val) 335static int uasm_rel_higher(long val)
336{ 336{
337#ifdef CONFIG_64BIT 337#ifdef CONFIG_64BIT
338 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 338 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -341,19 +341,19 @@ static int __uasminit uasm_rel_higher(long val)
341#endif 341#endif
342} 342}
343 343
344int __uasminit ISAFUNC(uasm_rel_hi)(long val) 344int ISAFUNC(uasm_rel_hi)(long val)
345{ 345{
346 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 346 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
347} 347}
348UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); 348UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
349 349
350int __uasminit ISAFUNC(uasm_rel_lo)(long val) 350int ISAFUNC(uasm_rel_lo)(long val)
351{ 351{
352 return ((val & 0xffff) ^ 0x8000) - 0x8000; 352 return ((val & 0xffff) ^ 0x8000) - 0x8000;
353} 353}
354UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); 354UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
355 355
356void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) 356void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
357{ 357{
358 if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { 358 if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
359 ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); 359 ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
@@ -371,7 +371,7 @@ void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
371} 371}
372UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); 372UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
373 373
374void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) 374void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
375{ 375{
376 ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); 376 ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
377 if (ISAFUNC(uasm_rel_lo(addr))) { 377 if (ISAFUNC(uasm_rel_lo(addr))) {
@@ -386,8 +386,7 @@ void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
386UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); 386UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
387 387
388/* Handle relocations. */ 388/* Handle relocations. */
389void __uasminit 389void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
390ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
391{ 390{
392 (*rel)->addr = addr; 391 (*rel)->addr = addr;
393 (*rel)->type = R_MIPS_PC16; 392 (*rel)->type = R_MIPS_PC16;
@@ -396,11 +395,11 @@ ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
396} 395}
397UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); 396UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
398 397
399static inline void __uasminit 398static inline void __resolve_relocs(struct uasm_reloc *rel,
400__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); 399 struct uasm_label *lab);
401 400
402void __uasminit 401void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
403ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) 402 struct uasm_label *lab)
404{ 403{
405 struct uasm_label *l; 404 struct uasm_label *l;
406 405
@@ -411,8 +410,8 @@ ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
411} 410}
412UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); 411UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
413 412
414void __uasminit 413void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
415ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 414 long off)
416{ 415{
417 for (; rel->lab != UASM_LABEL_INVALID; rel++) 416 for (; rel->lab != UASM_LABEL_INVALID; rel++)
418 if (rel->addr >= first && rel->addr < end) 417 if (rel->addr >= first && rel->addr < end)
@@ -420,8 +419,8 @@ ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off
420} 419}
421UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); 420UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
422 421
423void __uasminit 422void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
424ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off) 423 long off)
425{ 424{
426 for (; lab->lab != UASM_LABEL_INVALID; lab++) 425 for (; lab->lab != UASM_LABEL_INVALID; lab++)
427 if (lab->addr >= first && lab->addr < end) 426 if (lab->addr >= first && lab->addr < end)
@@ -429,9 +428,8 @@ ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off
429} 428}
430UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); 429UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
431 430
432void __uasminit 431void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
433ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 432 u32 *first, u32 *end, u32 *target)
434 u32 *end, u32 *target)
435{ 433{
436 long off = (long)(target - first); 434 long off = (long)(target - first);
437 435
@@ -442,7 +440,7 @@ ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *
442} 440}
443UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); 441UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
444 442
445int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) 443int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
446{ 444{
447 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 445 for (; rel->lab != UASM_LABEL_INVALID; rel++) {
448 if (rel->addr == addr 446 if (rel->addr == addr
@@ -456,83 +454,79 @@ int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
456UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); 454UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
457 455
458/* Convenience functions for labeled branches. */ 456/* Convenience functions for labeled branches. */
459void __uasminit 457void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
460ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 458 int lid)
461{ 459{
462 uasm_r_mips_pc16(r, *p, lid); 460 uasm_r_mips_pc16(r, *p, lid);
463 ISAFUNC(uasm_i_bltz)(p, reg, 0); 461 ISAFUNC(uasm_i_bltz)(p, reg, 0);
464} 462}
465UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); 463UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
466 464
467void __uasminit 465void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
468ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
469{ 466{
470 uasm_r_mips_pc16(r, *p, lid); 467 uasm_r_mips_pc16(r, *p, lid);
471 ISAFUNC(uasm_i_b)(p, 0); 468 ISAFUNC(uasm_i_b)(p, 0);
472} 469}
473UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); 470UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
474 471
475void __uasminit 472void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
476ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 473 int lid)
477{ 474{
478 uasm_r_mips_pc16(r, *p, lid); 475 uasm_r_mips_pc16(r, *p, lid);
479 ISAFUNC(uasm_i_beqz)(p, reg, 0); 476 ISAFUNC(uasm_i_beqz)(p, reg, 0);
480} 477}
481UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); 478UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
482 479
483void __uasminit 480void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
484ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 481 int lid)
485{ 482{
486 uasm_r_mips_pc16(r, *p, lid); 483 uasm_r_mips_pc16(r, *p, lid);
487 ISAFUNC(uasm_i_beqzl)(p, reg, 0); 484 ISAFUNC(uasm_i_beqzl)(p, reg, 0);
488} 485}
489UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); 486UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
490 487
491void __uasminit 488void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
492ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, 489 unsigned int reg2, int lid)
493 unsigned int reg2, int lid)
494{ 490{
495 uasm_r_mips_pc16(r, *p, lid); 491 uasm_r_mips_pc16(r, *p, lid);
496 ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); 492 ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
497} 493}
498UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); 494UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
499 495
500void __uasminit 496void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
501ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 497 int lid)
502{ 498{
503 uasm_r_mips_pc16(r, *p, lid); 499 uasm_r_mips_pc16(r, *p, lid);
504 ISAFUNC(uasm_i_bnez)(p, reg, 0); 500 ISAFUNC(uasm_i_bnez)(p, reg, 0);
505} 501}
506UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); 502UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
507 503
508void __uasminit 504void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
509ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 505 int lid)
510{ 506{
511 uasm_r_mips_pc16(r, *p, lid); 507 uasm_r_mips_pc16(r, *p, lid);
512 ISAFUNC(uasm_i_bgezl)(p, reg, 0); 508 ISAFUNC(uasm_i_bgezl)(p, reg, 0);
513} 509}
514UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); 510UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
515 511
516void __uasminit 512void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
517ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 513 int lid)
518{ 514{
519 uasm_r_mips_pc16(r, *p, lid); 515 uasm_r_mips_pc16(r, *p, lid);
520 ISAFUNC(uasm_i_bgez)(p, reg, 0); 516 ISAFUNC(uasm_i_bgez)(p, reg, 0);
521} 517}
522UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); 518UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
523 519
524void __uasminit 520void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
525ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, 521 unsigned int bit, int lid)
526 unsigned int bit, int lid)
527{ 522{
528 uasm_r_mips_pc16(r, *p, lid); 523 uasm_r_mips_pc16(r, *p, lid);
529 ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); 524 ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
530} 525}
531UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); 526UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
532 527
533void __uasminit 528void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
534ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, 529 unsigned int bit, int lid)
535 unsigned int bit, int lid)
536{ 530{
537 uasm_r_mips_pc16(r, *p, lid); 531 uasm_r_mips_pc16(r, *p, lid);
538 ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); 532 ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index becbf47506a5..c4849904f013 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -32,7 +32,7 @@ static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
32/* 32/*
33 * Post-config but pre-boot cleanup entry point 33 * Post-config but pre-boot cleanup entry point
34 */ 34 */
35static void __cpuinit msmtc_init_secondary(void) 35static void msmtc_init_secondary(void)
36{ 36{
37 int myvpe; 37 int myvpe;
38 38
@@ -53,7 +53,7 @@ static void __cpuinit msmtc_init_secondary(void)
53/* 53/*
54 * Platform "CPU" startup hook 54 * Platform "CPU" startup hook
55 */ 55 */
56static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) 56static void msmtc_boot_secondary(int cpu, struct task_struct *idle)
57{ 57{
58 smtc_boot_secondary(cpu, idle); 58 smtc_boot_secondary(cpu, idle);
59} 59}
@@ -61,7 +61,7 @@ static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
61/* 61/*
62 * SMP initialization finalization entry point 62 * SMP initialization finalization entry point
63 */ 63 */
64static void __cpuinit msmtc_smp_finish(void) 64static void msmtc_smp_finish(void)
65{ 65{
66 smtc_smp_finish(); 66 smtc_smp_finish();
67} 67}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 0ad305f75802..53aad4a35375 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -150,7 +150,7 @@ static void __init plat_perf_setup(void)
150 } 150 }
151} 151}
152 152
153unsigned int __cpuinit get_c0_compare_int(void) 153unsigned int get_c0_compare_int(void)
154{ 154{
155#ifdef MSC01E_INT_BASE 155#ifdef MSC01E_INT_BASE
156 if (cpu_has_veic) { 156 if (cpu_has_veic) {
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 96b42eb9b5e2..a43ea3cc0a3b 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -91,7 +91,7 @@ static void __init plat_perf_setup(void)
91 } 91 }
92} 92}
93 93
94unsigned int __cpuinit get_c0_compare_int(void) 94unsigned int get_c0_compare_int(void)
95{ 95{
96 if (cpu_has_vint) 96 if (cpu_has_vint)
97 set_vi_handler(cp0_compare_irq, mips_timer_dispatch); 97 set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 73facb2b33bb..1c7e3a1b81ab 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -40,6 +40,10 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/irq.h> 41#include <linux/irq.h>
42 42
43#include <linux/irqdomain.h>
44#include <linux/of_address.h>
45#include <linux/of_irq.h>
46
43#include <asm/errno.h> 47#include <asm/errno.h>
44#include <asm/signal.h> 48#include <asm/signal.h>
45#include <asm/ptrace.h> 49#include <asm/ptrace.h>
@@ -223,17 +227,6 @@ static void nlm_init_node_irqs(int node)
223 nodep->irqmask = irqmask; 227 nodep->irqmask = irqmask;
224} 228}
225 229
226void __init arch_init_irq(void)
227{
228 /* Initialize the irq descriptors */
229 nlm_init_percpu_irqs();
230 nlm_init_node_irqs(0);
231 write_c0_eimr(nlm_current_node()->irqmask);
232#if defined(CONFIG_CPU_XLR)
233 nlm_setup_fmn_irq();
234#endif
235}
236
237void nlm_smp_irq_init(int hwcpuid) 230void nlm_smp_irq_init(int hwcpuid)
238{ 231{
239 int node, cpu; 232 int node, cpu;
@@ -266,3 +259,56 @@ asmlinkage void plat_irq_dispatch(void)
266 /* top level irq handling */ 259 /* top level irq handling */
267 do_IRQ(nlm_irq_to_xirq(node, i)); 260 do_IRQ(nlm_irq_to_xirq(node, i));
268} 261}
262
263#ifdef CONFIG_OF
264static struct irq_domain *xlp_pic_domain;
265
266static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
267 .xlate = irq_domain_xlate_onetwocell,
268};
269
270static int __init xlp_of_pic_init(struct device_node *node,
271 struct device_node *parent)
272{
273 const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
274 struct resource res;
275 int socid, ret;
276
277 /* we need a hack to get the PIC's SoC chip id */
278 ret = of_address_to_resource(node, 0, &res);
279 if (ret < 0) {
280 pr_err("PIC %s: reg property not found!\n", node->name);
281 return -EINVAL;
282 }
283 socid = (res.start >> 18) & 0x3;
284 xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
285 nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
286 &xlp_pic_irq_domain_ops, NULL);
287 if (xlp_pic_domain == NULL) {
288 pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
289 return -EINVAL;
290 }
291 pr_info("Node %d: IRQ domain created for PIC@%pa\n", socid,
292 &res.start);
293 return 0;
294}
295
296static struct of_device_id __initdata xlp_pic_irq_ids[] = {
297 { .compatible = "netlogic,xlp-pic", .data = xlp_of_pic_init },
298 {},
299};
300#endif
301
302void __init arch_init_irq(void)
303{
304 /* Initialize the irq descriptors */
305 nlm_init_percpu_irqs();
306 nlm_init_node_irqs(0);
307 write_c0_eimr(nlm_current_node()->irqmask);
308#if defined(CONFIG_CPU_XLR)
309 nlm_setup_fmn_irq();
310#endif
311#if defined(CONFIG_OF)
312 of_irq_init(xlp_pic_irq_ids);
313#endif
314}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 885d293b61da..4e35d9c453e2 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -116,7 +116,7 @@ void nlm_early_init_secondary(int cpu)
116/* 116/*
117 * Code to run on secondary just after probing the CPU 117 * Code to run on secondary just after probing the CPU
118 */ 118 */
119static void __cpuinit nlm_init_secondary(void) 119static void nlm_init_secondary(void)
120{ 120{
121 int hwtid; 121 int hwtid;
122 122
@@ -252,7 +252,7 @@ unsupp:
252 return 0; 252 return 0;
253} 253}
254 254
255int __cpuinit nlm_wakeup_secondary_cpus(void) 255int nlm_wakeup_secondary_cpus(void)
256{ 256{
257 u32 *reset_data; 257 u32 *reset_data;
258 int threadmode; 258 int threadmode;
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index 528c46c5a170..aa6cff0a229b 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -70,7 +70,6 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
70 nop 70 nop
71 /* not reached */ 71 /* not reached */
72 72
73 __CPUINIT
74NESTED(nlm_boot_secondary_cpus, 16, sp) 73NESTED(nlm_boot_secondary_cpus, 16, sp)
75 /* Initialize CP0 Status */ 74 /* Initialize CP0 Status */
76 move t1, zero 75 move t1, zero
@@ -94,7 +93,6 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
94 jr t0 93 jr t0
95 nop 94 nop
96END(nlm_boot_secondary_cpus) 95END(nlm_boot_secondary_cpus)
97 __FINIT
98 96
99/* 97/*
100 * In case of RMIboot bootloader which is used on XLR boards, the CPUs 98 * In case of RMIboot bootloader which is used on XLR boards, the CPUs
@@ -102,7 +100,6 @@ END(nlm_boot_secondary_cpus)
102 * This will get them out of the bootloader code and into linux. Needed 100 * This will get them out of the bootloader code and into linux. Needed
103 * because the bootloader area will be taken and initialized by linux. 101 * because the bootloader area will be taken and initialized by linux.
104 */ 102 */
105 __CPUINIT
106NESTED(nlm_rmiboot_preboot, 16, sp) 103NESTED(nlm_rmiboot_preboot, 16, sp)
107 mfc0 t0, $15, 1 /* read ebase */ 104 mfc0 t0, $15, 1 /* read ebase */
108 andi t0, 0x1f /* t0 has the processor_id() */ 105 andi t0, 0x1f /* t0 has the processor_id() */
@@ -140,4 +137,3 @@ NESTED(nlm_rmiboot_preboot, 16, sp)
140 b 1b 137 b 1b
141 nop 138 nop
142END(nlm_rmiboot_preboot) 139END(nlm_rmiboot_preboot)
143 __FINIT
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index 5c56555380bb..045a396c57ce 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -54,7 +54,7 @@
54#error "Unknown CPU" 54#error "Unknown CPU"
55#endif 55#endif
56 56
57unsigned int __cpuinit get_c0_compare_int(void) 57unsigned int get_c0_compare_int(void)
58{ 58{
59 return IRQ_TIMER; 59 return IRQ_TIMER;
60} 60}
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index e14f42308064..06407033678e 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -76,10 +76,11 @@
76 }; 76 };
77 }; 77 };
78 pic: pic@4000 { 78 pic: pic@4000 {
79 interrupt-controller; 79 compatible = "netlogic,xlp-pic";
80 #address-cells = <0>; 80 #address-cells = <0>;
81 #interrupt-cells = <1>; 81 #interrupt-cells = <1>;
82 reg = <0 0x4000 0x200>; 82 reg = <0 0x4000 0x200>;
83 interrupt-controller;
83 }; 84 };
84 85
85 nor_flash@1,0 { 86 nor_flash@1,0 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
index 8af4bdbe5d99..9c5db102df53 100644
--- a/arch/mips/netlogic/dts/xlp_svp.dts
+++ b/arch/mips/netlogic/dts/xlp_svp.dts
@@ -76,10 +76,11 @@
76 }; 76 };
77 }; 77 };
78 pic: pic@4000 { 78 pic: pic@4000 {
79 interrupt-controller; 79 compatible = "netlogic,xlp-pic";
80 #address-cells = <0>; 80 #address-cells = <0>;
81 #interrupt-cells = <1>; 81 #interrupt-cells = <1>;
82 reg = <0 0x4000 0x200>; 82 reg = <0 0x4000 0x200>;
83 interrupt-controller;
83 }; 84 };
84 85
85 nor_flash@1,0 { 86 nor_flash@1,0 {
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index 9c401dd78337..ef3897ef0dc7 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -119,7 +119,7 @@ static u64 xlp_usb_dmamask = ~(u32)0;
119static void nlm_usb_fixup_final(struct pci_dev *dev) 119static void nlm_usb_fixup_final(struct pci_dev *dev)
120{ 120{
121 dev->dev.dma_mask = &xlp_usb_dmamask; 121 dev->dev.dma_mask = &xlp_usb_dmamask;
122 dev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 122 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
123 switch (dev->devfn) { 123 switch (dev->devfn) {
124 case 0x10: 124 case 0x10:
125 dev->irq = PIC_EHCI_0_IRQ; 125 dev->irq = PIC_EHCI_0_IRQ;
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index c06e4c9f0478..9fb81fa6272a 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -49,7 +49,7 @@
49#include <asm/netlogic/xlr/iomap.h> 49#include <asm/netlogic/xlr/iomap.h>
50#include <asm/netlogic/xlr/pic.h> 50#include <asm/netlogic/xlr/pic.h>
51 51
52int __cpuinit xlr_wakeup_secondary_cpus(void) 52int xlr_wakeup_secondary_cpus(void)
53{ 53{
54 struct nlm_soc_info *nodep; 54 struct nlm_soc_info *nodep;
55 unsigned int i, j, boot_cpu; 55 unsigned int i, j, boot_cpu;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e4b1140cdae0..3a2b6e9f25cf 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
166 reg.control[i] |= M_PERFCTL_USER; 166 reg.control[i] |= M_PERFCTL_USER;
167 if (ctr[i].exl) 167 if (ctr[i].exl)
168 reg.control[i] |= M_PERFCTL_EXL; 168 reg.control[i] |= M_PERFCTL_EXL;
169 if (current_cpu_type() == CPU_XLR) 169 if (boot_cpu_type() == CPU_XLR)
170 reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; 170 reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
171 reg.counter[i] = 0x80000000 - ctr[i].count; 171 reg.counter[i] = 0x80000000 - ctr[i].count;
172 } 172 }
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 7b2ac81e1f59..162b4cb29dba 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -42,7 +42,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
42 42
43extern struct pci_ops bridge_pci_ops; 43extern struct pci_ops bridge_pci_ops;
44 44
45int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) 45int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
46{ 46{
47 unsigned long offset = NODE_OFFSET(nasid); 47 unsigned long offset = NODE_OFFSET(nasid);
48 struct bridge_controller *bc; 48 struct bridge_controller *bc;
diff --git a/arch/mips/pmcs-msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c
index c8dcc1c01e18..6b5607fce279 100644
--- a/arch/mips/pmcs-msp71xx/msp_smtc.c
+++ b/arch/mips/pmcs-msp71xx/msp_smtc.c
@@ -33,7 +33,7 @@ static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
33/* 33/*
34 * Post-config but pre-boot cleanup entry point 34 * Post-config but pre-boot cleanup entry point
35 */ 35 */
36static void __cpuinit msp_smtc_init_secondary(void) 36static void msp_smtc_init_secondary(void)
37{ 37{
38 int myvpe; 38 int myvpe;
39 39
@@ -48,8 +48,7 @@ static void __cpuinit msp_smtc_init_secondary(void)
48/* 48/*
49 * Platform "CPU" startup hook 49 * Platform "CPU" startup hook
50 */ 50 */
51static void __cpuinit msp_smtc_boot_secondary(int cpu, 51static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle)
52 struct task_struct *idle)
53{ 52{
54 smtc_boot_secondary(cpu, idle); 53 smtc_boot_secondary(cpu, idle);
55} 54}
@@ -57,7 +56,7 @@ static void __cpuinit msp_smtc_boot_secondary(int cpu,
57/* 56/*
58 * SMP initialization finalization entry point 57 * SMP initialization finalization entry point
59 */ 58 */
60static void __cpuinit msp_smtc_smp_finish(void) 59static void msp_smtc_smp_finish(void)
61{ 60{
62 smtc_smp_finish(); 61 smtc_smp_finish();
63} 62}
diff --git a/arch/mips/pmcs-msp71xx/msp_time.c b/arch/mips/pmcs-msp71xx/msp_time.c
index 8f12ecc55ace..fea917be0ff1 100644
--- a/arch/mips/pmcs-msp71xx/msp_time.c
+++ b/arch/mips/pmcs-msp71xx/msp_time.c
@@ -88,7 +88,7 @@ void __init plat_time_init(void)
88 mips_hpt_frequency = cpu_rate/2; 88 mips_hpt_frequency = cpu_rate/2;
89} 89}
90 90
91unsigned int __cpuinit get_c0_compare_int(void) 91unsigned int get_c0_compare_int(void)
92{ 92{
93 /* MIPS_MT modes may want timer for second VPE */ 93 /* MIPS_MT modes may want timer for second VPE */
94 if ((get_current_vpe()) && !tim_installed) { 94 if ((get_current_vpe()) && !tim_installed) {
diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c
index a4a90596c0ad..e460865873c1 100644
--- a/arch/mips/pnx833x/common/interrupts.c
+++ b/arch/mips/pnx833x/common/interrupts.c
@@ -281,7 +281,7 @@ void __init arch_init_irq(void)
281 write_c0_status(read_c0_status() | IE_IRQ2); 281 write_c0_status(read_c0_status() | IE_IRQ2);
282} 282}
283 283
284unsigned int __cpuinit get_c0_compare_int(void) 284unsigned int get_c0_compare_int(void)
285{ 285{
286 if (cpu_has_vint) 286 if (cpu_has_vint)
287 set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch); 287 set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch);
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index d22dc0d6f289..2b7e837dc2e2 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = {
206 .end = PNX8335_IP3902_PORTS_END, 206 .end = PNX8335_IP3902_PORTS_END,
207 .flags = IORESOURCE_MEM, 207 .flags = IORESOURCE_MEM,
208 }, 208 },
209#ifdef CONFIG_SOC_PNX8335
209 [1] = { 210 [1] = {
210 .start = PNX8335_PIC_ETHERNET_INT, 211 .start = PNX8335_PIC_ETHERNET_INT,
211 .end = PNX8335_PIC_ETHERNET_INT, 212 .end = PNX8335_PIC_ETHERNET_INT,
212 .flags = IORESOURCE_IRQ, 213 .flags = IORESOURCE_IRQ,
213 }, 214 },
215#endif
214}; 216};
215 217
216static struct platform_device pnx833x_ethernet_device = { 218static struct platform_device pnx833x_ethernet_device = {
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index 9f64c2387808..0238af1ba503 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -529,8 +529,7 @@ EXPORT_SYMBOL(asic_resource_get);
529 */ 529 */
530void platform_release_memory(void *ptr, int size) 530void platform_release_memory(void *ptr, int size)
531{ 531{
532 free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size), 532 free_reserved_area(ptr, ptr + size, -1, NULL);
533 -1, NULL);
534} 533}
535EXPORT_SYMBOL(platform_release_memory); 534EXPORT_SYMBOL(platform_release_memory);
536 535
diff --git a/arch/mips/powertv/time.c b/arch/mips/powertv/time.c
index 9fd7b67f2af7..f38b0d45eca9 100644
--- a/arch/mips/powertv/time.c
+++ b/arch/mips/powertv/time.c
@@ -25,7 +25,7 @@
25 25
26#include "powertv-clock.h" 26#include "powertv-clock.h"
27 27
28unsigned int __cpuinit get_c0_compare_int(void) 28unsigned int get_c0_compare_int(void)
29{ 29{
30 return irq_mips_timer; 30 return irq_mips_timer;
31} 31}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 320b1f1043ff..781b3d14a489 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -73,7 +73,7 @@ static struct irq_chip ralink_intc_irq_chip = {
73 .irq_mask_ack = ralink_intc_irq_mask, 73 .irq_mask_ack = ralink_intc_irq_mask,
74}; 74};
75 75
76unsigned int __cpuinit get_c0_compare_int(void) 76unsigned int get_c0_compare_int(void)
77{ 77{
78 return CP0_LEGACY_COMPARE_IRQ; 78 return CP0_LEGACY_COMPARE_IRQ;
79} 79}
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index d41b1c6fb032..ee736bd103f8 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -54,7 +54,7 @@ extern void pcibr_setup(cnodeid_t);
54 54
55extern void xtalk_probe_node(cnodeid_t nid); 55extern void xtalk_probe_node(cnodeid_t nid);
56 56
57static void __cpuinit per_hub_init(cnodeid_t cnode) 57static void per_hub_init(cnodeid_t cnode)
58{ 58{
59 struct hub_data *hub = hub_data(cnode); 59 struct hub_data *hub = hub_data(cnode);
60 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 60 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
@@ -110,7 +110,7 @@ static void __cpuinit per_hub_init(cnodeid_t cnode)
110 } 110 }
111} 111}
112 112
113void __cpuinit per_cpu_init(void) 113void per_cpu_init(void)
114{ 114{
115 int cpu = smp_processor_id(); 115 int cpu = smp_processor_id();
116 int slice = LOCAL_HUB_L(PI_CPU_NUM); 116 int slice = LOCAL_HUB_L(PI_CPU_NUM);
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f94638141b20..f4ea8aa79ba2 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -173,12 +173,12 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
173 ip27_send_ipi_single(i, action); 173 ip27_send_ipi_single(i, action);
174} 174}
175 175
176static void __cpuinit ip27_init_secondary(void) 176static void ip27_init_secondary(void)
177{ 177{
178 per_cpu_init(); 178 per_cpu_init();
179} 179}
180 180
181static void __cpuinit ip27_smp_finish(void) 181static void ip27_smp_finish(void)
182{ 182{
183 extern void hub_rt_clock_event_init(void); 183 extern void hub_rt_clock_event_init(void);
184 184
@@ -195,7 +195,7 @@ static void __init ip27_cpus_done(void)
195 * set sp to the kernel stack of the newly created idle process, gp to the proc 195 * set sp to the kernel stack of the newly created idle process, gp to the proc
196 * struct so that current_thread_info() will work. 196 * struct so that current_thread_info() will work.
197 */ 197 */
198static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle) 198static void ip27_boot_secondary(int cpu, struct task_struct *idle)
199{ 199{
200 unsigned long gp = (unsigned long)task_thread_info(idle); 200 unsigned long gp = (unsigned long)task_thread_info(idle);
201 unsigned long sp = __KSTK_TOS(idle); 201 unsigned long sp = __KSTK_TOS(idle);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 2e21b761cb9c..1d97eaba0c5f 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -106,7 +106,7 @@ struct irqaction hub_rt_irqaction = {
106#define NSEC_PER_CYCLE 800 106#define NSEC_PER_CYCLE 800
107#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE) 107#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
108 108
109void __cpuinit hub_rt_clock_event_init(void) 109void hub_rt_clock_event_init(void)
110{ 110{
111 unsigned int cpu = smp_processor_id(); 111 unsigned int cpu = smp_processor_id();
112 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); 112 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
@@ -173,7 +173,7 @@ void __init plat_time_init(void)
173 hub_rt_clock_event_init(); 173 hub_rt_clock_event_init();
174} 174}
175 175
176void __cpuinit cpu_time_init(void) 176void cpu_time_init(void)
177{ 177{
178 lboard_t *board; 178 lboard_t *board;
179 klcpu_t *cpu; 179 klcpu_t *cpu;
@@ -194,7 +194,7 @@ void __cpuinit cpu_time_init(void)
194 set_c0_status(SRB_TIMOCLK); 194 set_c0_status(SRB_TIMOCLK);
195} 195}
196 196
197void __cpuinit hub_rtc_init(cnodeid_t cnode) 197void hub_rtc_init(cnodeid_t cnode)
198{ 198{
199 199
200 /* 200 /*
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index a4df7d0f6f12..d59b820f528d 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -23,7 +23,7 @@
23 23
24extern int bridge_probe(nasid_t nasid, int widget, int masterwid); 24extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
25 25
26static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid) 26static int probe_one_port(nasid_t nasid, int widget, int masterwid)
27{ 27{
28 widgetreg_t widget_id; 28 widgetreg_t widget_id;
29 xwidget_part_num_t partnum; 29 xwidget_part_num_t partnum;
@@ -47,7 +47,7 @@ static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
47 return 0; 47 return 0;
48} 48}
49 49
50static int __cpuinit xbow_probe(nasid_t nasid) 50static int xbow_probe(nasid_t nasid)
51{ 51{
52 lboard_t *brd; 52 lboard_t *brd;
53 klxbow_t *xbow_p; 53 klxbow_t *xbow_p;
@@ -100,7 +100,7 @@ static int __cpuinit xbow_probe(nasid_t nasid)
100 return 0; 100 return 0;
101} 101}
102 102
103void __cpuinit xtalk_probe_node(cnodeid_t nid) 103void xtalk_probe_node(cnodeid_t nid)
104{ 104{
105 volatile u64 hubreg; 105 volatile u64 hubreg;
106 nasid_t nasid; 106 nasid_t nasid;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index de88e22694a0..54e2c4de15c1 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -60,7 +60,7 @@ static void *mailbox_0_regs[] = {
60/* 60/*
61 * SMP init and finish on secondary CPUs 61 * SMP init and finish on secondary CPUs
62 */ 62 */
63void __cpuinit bcm1480_smp_init(void) 63void bcm1480_smp_init(void)
64{ 64{
65 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | 65 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
66 STATUSF_IP1 | STATUSF_IP0; 66 STATUSF_IP1 | STATUSF_IP0;
@@ -95,7 +95,7 @@ static void bcm1480_send_ipi_mask(const struct cpumask *mask,
95/* 95/*
96 * Code to run on secondary just after probing the CPU 96 * Code to run on secondary just after probing the CPU
97 */ 97 */
98static void __cpuinit bcm1480_init_secondary(void) 98static void bcm1480_init_secondary(void)
99{ 99{
100 extern void bcm1480_smp_init(void); 100 extern void bcm1480_smp_init(void);
101 101
@@ -106,7 +106,7 @@ static void __cpuinit bcm1480_init_secondary(void)
106 * Do any tidying up before marking online and running the idle 106 * Do any tidying up before marking online and running the idle
107 * loop 107 * loop
108 */ 108 */
109static void __cpuinit bcm1480_smp_finish(void) 109static void bcm1480_smp_finish(void)
110{ 110{
111 extern void sb1480_clockevent_init(void); 111 extern void sb1480_clockevent_init(void);
112 112
@@ -125,7 +125,7 @@ static void bcm1480_cpus_done(void)
125 * Setup the PC, SP, and GP of a secondary processor and start it 125 * Setup the PC, SP, and GP of a secondary processor and start it
126 * running! 126 * running!
127 */ 127 */
128static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) 128static void bcm1480_boot_secondary(int cpu, struct task_struct *idle)
129{ 129{
130 int retval; 130 int retval;
131 131
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 285cfef4ebc0..d7b942db0ea5 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -48,7 +48,7 @@ static void *mailbox_regs[] = {
48/* 48/*
49 * SMP init and finish on secondary CPUs 49 * SMP init and finish on secondary CPUs
50 */ 50 */
51void __cpuinit sb1250_smp_init(void) 51void sb1250_smp_init(void)
52{ 52{
53 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | 53 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
54 STATUSF_IP1 | STATUSF_IP0; 54 STATUSF_IP1 | STATUSF_IP0;
@@ -83,7 +83,7 @@ static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
83/* 83/*
84 * Code to run on secondary just after probing the CPU 84 * Code to run on secondary just after probing the CPU
85 */ 85 */
86static void __cpuinit sb1250_init_secondary(void) 86static void sb1250_init_secondary(void)
87{ 87{
88 extern void sb1250_smp_init(void); 88 extern void sb1250_smp_init(void);
89 89
@@ -94,7 +94,7 @@ static void __cpuinit sb1250_init_secondary(void)
94 * Do any tidying up before marking online and running the idle 94 * Do any tidying up before marking online and running the idle
95 * loop 95 * loop
96 */ 96 */
97static void __cpuinit sb1250_smp_finish(void) 97static void sb1250_smp_finish(void)
98{ 98{
99 extern void sb1250_clockevent_init(void); 99 extern void sb1250_clockevent_init(void);
100 100
@@ -113,7 +113,7 @@ static void sb1250_cpus_done(void)
113 * Setup the PC, SP, and GP of a secondary processor and start it 113 * Setup the PC, SP, and GP of a secondary processor and start it
114 * running! 114 * running!
115 */ 115 */
116static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) 116static void sb1250_boot_secondary(int cpu, struct task_struct *idle)
117{ 117{
118 int retval; 118 int retval;
119 119
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index f4d5bedc3b4f..d7359ffbcbdd 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -267,7 +267,7 @@ void __init detect_unit_config(unsigned long upr, unsigned long mask,
267 * 267 *
268 */ 268 */
269 269
270void __cpuinit calibrate_delay(void) 270void calibrate_delay(void)
271{ 271{
272 const int *val; 272 const int *val;
273 struct device_node *cpu = NULL; 273 struct device_node *cpu = NULL;
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
new file mode 100644
index 000000000000..f11006361297
--- /dev/null
+++ b/arch/parisc/configs/c8000_defconfig
@@ -0,0 +1,279 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_IKCONFIG=y
8CONFIG_IKCONFIG_PROC=y
9CONFIG_RELAY=y
10CONFIG_BLK_DEV_INITRD=y
11CONFIG_RD_BZIP2=y
12CONFIG_RD_LZMA=y
13CONFIG_RD_LZO=y
14CONFIG_EXPERT=y
15CONFIG_SYSCTL_SYSCALL=y
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y
21CONFIG_BLK_DEV_INTEGRITY=y
22CONFIG_PA8X00=y
23CONFIG_MLONGCALLS=y
24CONFIG_64BIT=y
25CONFIG_SMP=y
26CONFIG_PREEMPT=y
27# CONFIG_CROSS_MEMORY_ATTACH is not set
28CONFIG_IOMMU_CCIO=y
29CONFIG_PCI=y
30CONFIG_PCI_LBA=y
31# CONFIG_SUPERIO is not set
32# CONFIG_CHASSIS_LCD_LED is not set
33# CONFIG_PDC_CHASSIS is not set
34# CONFIG_PDC_CHASSIS_WARN is not set
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_MISC=m
37CONFIG_PACKET=y
38CONFIG_UNIX=y
39CONFIG_XFRM_USER=m
40CONFIG_XFRM_SUB_POLICY=y
41CONFIG_NET_KEY=m
42CONFIG_INET=y
43CONFIG_IP_MULTICAST=y
44CONFIG_IP_PNP=y
45CONFIG_IP_PNP_DHCP=y
46CONFIG_IP_PNP_BOOTP=y
47CONFIG_IP_PNP_RARP=y
48CONFIG_NET_IPIP=m
49CONFIG_IP_MROUTE=y
50CONFIG_IP_PIMSM_V1=y
51CONFIG_IP_PIMSM_V2=y
52CONFIG_SYN_COOKIES=y
53CONFIG_INET_AH=m
54CONFIG_INET_ESP=m
55CONFIG_INET_IPCOMP=m
56CONFIG_INET_XFRM_MODE_BEET=m
57CONFIG_INET_DIAG=m
58# CONFIG_IPV6 is not set
59CONFIG_IP_DCCP=m
60# CONFIG_IP_DCCP_CCID3 is not set
61CONFIG_TIPC=m
62CONFIG_LLC2=m
63CONFIG_DNS_RESOLVER=y
64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
65# CONFIG_STANDALONE is not set
66CONFIG_PARPORT=y
67CONFIG_PARPORT_PC=y
68CONFIG_PARPORT_PC_FIFO=y
69CONFIG_BLK_DEV_UMEM=m
70CONFIG_BLK_DEV_LOOP=m
71CONFIG_BLK_DEV_CRYPTOLOOP=m
72CONFIG_BLK_DEV_SX8=m
73CONFIG_BLK_DEV_RAM=y
74CONFIG_BLK_DEV_RAM_SIZE=6144
75CONFIG_CDROM_PKTCDVD=m
76CONFIG_CDROM_PKTCDVD_WCACHE=y
77CONFIG_ATA_OVER_ETH=m
78CONFIG_IDE=y
79CONFIG_BLK_DEV_IDECD=y
80CONFIG_BLK_DEV_PLATFORM=y
81CONFIG_BLK_DEV_GENERIC=y
82CONFIG_BLK_DEV_SIIMAGE=y
83CONFIG_SCSI=y
84CONFIG_BLK_DEV_SD=y
85CONFIG_CHR_DEV_ST=m
86CONFIG_BLK_DEV_SR=m
87CONFIG_CHR_DEV_SG=y
88CONFIG_CHR_DEV_SCH=m
89CONFIG_SCSI_CONSTANTS=y
90CONFIG_SCSI_LOGGING=y
91CONFIG_SCSI_FC_ATTRS=y
92CONFIG_SCSI_SAS_LIBSAS=m
93CONFIG_ISCSI_TCP=m
94CONFIG_ISCSI_BOOT_SYSFS=m
95CONFIG_FUSION=y
96CONFIG_FUSION_SPI=y
97CONFIG_FUSION_SAS=y
98CONFIG_NETDEVICES=y
99CONFIG_DUMMY=m
100CONFIG_NETCONSOLE=m
101CONFIG_TUN=y
102CONFIG_E1000=y
103CONFIG_PPP=m
104CONFIG_PPP_BSDCOMP=m
105CONFIG_PPP_DEFLATE=m
106CONFIG_PPP_MPPE=m
107CONFIG_PPPOE=m
108CONFIG_PPP_ASYNC=m
109CONFIG_PPP_SYNC_TTY=m
110# CONFIG_WLAN is not set
111CONFIG_INPUT_FF_MEMLESS=m
112# CONFIG_KEYBOARD_ATKBD is not set
113# CONFIG_KEYBOARD_HIL_OLD is not set
114# CONFIG_KEYBOARD_HIL is not set
115CONFIG_MOUSE_PS2=m
116CONFIG_INPUT_MISC=y
117CONFIG_INPUT_CM109=m
118CONFIG_SERIO_SERPORT=m
119CONFIG_SERIO_PARKBD=m
120CONFIG_SERIO_GSCPS2=m
121# CONFIG_HP_SDC is not set
122CONFIG_SERIO_PCIPS2=m
123CONFIG_SERIO_LIBPS2=y
124CONFIG_SERIO_RAW=m
125CONFIG_SERIAL_8250=y
126# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
127CONFIG_SERIAL_8250_CONSOLE=y
128CONFIG_SERIAL_8250_NR_UARTS=8
129CONFIG_SERIAL_8250_RUNTIME_UARTS=8
130CONFIG_SERIAL_8250_EXTENDED=y
131# CONFIG_SERIAL_MUX is not set
132CONFIG_SERIAL_JSM=m
133CONFIG_PRINTER=y
134CONFIG_HW_RANDOM=y
135CONFIG_RAW_DRIVER=m
136CONFIG_PTP_1588_CLOCK=y
137CONFIG_SSB=m
138CONFIG_SSB_DRIVER_PCICORE=y
139CONFIG_AGP=y
140CONFIG_AGP_PARISC=y
141CONFIG_DRM=y
142CONFIG_DRM_RADEON=y
143CONFIG_FIRMWARE_EDID=y
144CONFIG_FB_FOREIGN_ENDIAN=y
145CONFIG_FB_MODE_HELPERS=y
146CONFIG_FB_TILEBLITTING=y
147# CONFIG_FB_STI is not set
148CONFIG_BACKLIGHT_LCD_SUPPORT=y
149# CONFIG_LCD_CLASS_DEVICE is not set
150# CONFIG_BACKLIGHT_GENERIC is not set
151CONFIG_FRAMEBUFFER_CONSOLE=y
152# CONFIG_STI_CONSOLE is not set
153CONFIG_LOGO=y
154# CONFIG_LOGO_LINUX_MONO is not set
155# CONFIG_LOGO_LINUX_VGA16 is not set
156# CONFIG_LOGO_LINUX_CLUT224 is not set
157CONFIG_SOUND=m
158CONFIG_SND=m
159CONFIG_SND_SEQUENCER=m
160CONFIG_SND_SEQ_DUMMY=m
161CONFIG_SND_MIXER_OSS=m
162CONFIG_SND_PCM_OSS=m
163CONFIG_SND_SEQUENCER_OSS=y
164CONFIG_SND_VERBOSE_PRINTK=y
165CONFIG_SND_AD1889=m
166# CONFIG_SND_USB is not set
167# CONFIG_SND_GSC is not set
168CONFIG_HID_A4TECH=m
169CONFIG_HID_APPLE=m
170CONFIG_HID_BELKIN=m
171CONFIG_HID_CHERRY=m
172CONFIG_HID_CHICONY=m
173CONFIG_HID_CYPRESS=m
174CONFIG_HID_DRAGONRISE=m
175CONFIG_HID_EZKEY=m
176CONFIG_HID_KYE=m
177CONFIG_HID_GYRATION=m
178CONFIG_HID_TWINHAN=m
179CONFIG_HID_KENSINGTON=m
180CONFIG_HID_LOGITECH=m
181CONFIG_HID_LOGITECH_DJ=m
182CONFIG_HID_MICROSOFT=m
183CONFIG_HID_MONTEREY=m
184CONFIG_HID_NTRIG=m
185CONFIG_HID_ORTEK=m
186CONFIG_HID_PANTHERLORD=m
187CONFIG_HID_PETALYNX=m
188CONFIG_HID_SAMSUNG=m
189CONFIG_HID_SUNPLUS=m
190CONFIG_HID_GREENASIA=m
191CONFIG_HID_SMARTJOYPLUS=m
192CONFIG_HID_TOPSEED=m
193CONFIG_HID_THRUSTMASTER=m
194CONFIG_HID_ZEROPLUS=m
195CONFIG_USB_HID=m
196CONFIG_USB=y
197CONFIG_USB_OHCI_HCD=y
198CONFIG_USB_STORAGE=y
199CONFIG_EXT2_FS=y
200CONFIG_EXT2_FS_XATTR=y
201CONFIG_EXT2_FS_POSIX_ACL=y
202CONFIG_EXT2_FS_SECURITY=y
203CONFIG_EXT3_FS=y
204# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
205CONFIG_EXT4_FS=m
206CONFIG_REISERFS_FS=m
207CONFIG_REISERFS_PROC_INFO=y
208CONFIG_XFS_FS=m
209CONFIG_XFS_POSIX_ACL=y
210CONFIG_QUOTA=y
211CONFIG_QFMT_V1=m
212CONFIG_QFMT_V2=m
213CONFIG_AUTOFS4_FS=m
214CONFIG_FUSE_FS=m
215CONFIG_ISO9660_FS=y
216CONFIG_JOLIET=y
217CONFIG_MSDOS_FS=m
218CONFIG_VFAT_FS=m
219CONFIG_PROC_KCORE=y
220CONFIG_TMPFS=y
221CONFIG_TMPFS_XATTR=y
222CONFIG_NFS_FS=m
223CONFIG_NLS_CODEPAGE_437=m
224CONFIG_NLS_CODEPAGE_737=m
225CONFIG_NLS_CODEPAGE_775=m
226CONFIG_NLS_CODEPAGE_850=m
227CONFIG_NLS_CODEPAGE_852=m
228CONFIG_NLS_CODEPAGE_855=m
229CONFIG_NLS_CODEPAGE_857=m
230CONFIG_NLS_CODEPAGE_860=m
231CONFIG_NLS_CODEPAGE_861=m
232CONFIG_NLS_CODEPAGE_862=m
233CONFIG_NLS_CODEPAGE_863=m
234CONFIG_NLS_CODEPAGE_864=m
235CONFIG_NLS_CODEPAGE_865=m
236CONFIG_NLS_CODEPAGE_866=m
237CONFIG_NLS_CODEPAGE_869=m
238CONFIG_NLS_CODEPAGE_936=m
239CONFIG_NLS_CODEPAGE_950=m
240CONFIG_NLS_CODEPAGE_932=m
241CONFIG_NLS_CODEPAGE_949=m
242CONFIG_NLS_CODEPAGE_874=m
243CONFIG_NLS_ISO8859_8=m
244CONFIG_NLS_CODEPAGE_1250=m
245CONFIG_NLS_CODEPAGE_1251=m
246CONFIG_NLS_ASCII=m
247CONFIG_NLS_ISO8859_1=m
248CONFIG_NLS_ISO8859_2=m
249CONFIG_NLS_ISO8859_3=m
250CONFIG_NLS_ISO8859_4=m
251CONFIG_NLS_ISO8859_5=m
252CONFIG_NLS_ISO8859_6=m
253CONFIG_NLS_ISO8859_7=m
254CONFIG_NLS_ISO8859_9=m
255CONFIG_NLS_ISO8859_13=m
256CONFIG_NLS_ISO8859_14=m
257CONFIG_NLS_ISO8859_15=m
258CONFIG_NLS_KOI8_R=m
259CONFIG_NLS_KOI8_U=m
260CONFIG_NLS_UTF8=m
261CONFIG_UNUSED_SYMBOLS=y
262CONFIG_DEBUG_FS=y
263CONFIG_MAGIC_SYSRQ=y
264CONFIG_DEBUG_SLAB=y
265CONFIG_DEBUG_SLAB_LEAK=y
266CONFIG_DEBUG_MEMORY_INIT=y
267CONFIG_DEBUG_STACKOVERFLOW=y
268CONFIG_LOCKUP_DETECTOR=y
269CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
270CONFIG_PANIC_ON_OOPS=y
271CONFIG_DEBUG_RT_MUTEXES=y
272CONFIG_RT_MUTEX_TESTER=y
273CONFIG_PROVE_RCU_DELAY=y
274CONFIG_DEBUG_BLOCK_EXT_DEVT=y
275CONFIG_LATENCYTOP=y
276CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
277CONFIG_KEYS=y
278# CONFIG_CRYPTO_HW is not set
279CONFIG_FONTS=y
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 9afdad6c2ffb..eaf4dc1c7294 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -23,6 +23,7 @@ struct parisc_device {
23 /* generic info returned from pdc_pat_cell_module() */ 23 /* generic info returned from pdc_pat_cell_module() */
24 unsigned long mod_info; /* PAT specific - Misc Module info */ 24 unsigned long mod_info; /* PAT specific - Misc Module info */
25 unsigned long pmod_loc; /* physical Module location */ 25 unsigned long pmod_loc; /* physical Module location */
26 unsigned long mod0;
26#endif 27#endif
27 u64 dma_mask; /* DMA mask for I/O */ 28 u64 dma_mask; /* DMA mask for I/O */
28 struct device dev; 29 struct device dev;
@@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
61 62
62extern struct bus_type parisc_bus_type; 63extern struct bus_type parisc_bus_type;
63 64
65int iosapic_serial_irq(struct parisc_device *dev);
66
64#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/ 67#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 2e65aa54bd10..c035673209f7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
71} 71}
72EXPORT_SYMBOL(flush_cache_all_local); 72EXPORT_SYMBOL(flush_cache_all_local);
73 73
74/* Virtual address of pfn. */
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
74void 77void
75update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
76{ 79{
77 struct page *page = pte_page(*ptep); 80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
78 82
79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && 83 /* We don't have pte special. As a result, we can be called with
80 test_bit(PG_dcache_dirty, &page->flags)) { 84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn))
87 return;
81 88
82 flush_kernel_dcache_page(page); 89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
83 clear_bit(PG_dcache_dirty, &page->flags); 92 clear_bit(PG_dcache_dirty, &page->flags);
84 } else if (parisc_requires_coherency()) 93 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page); 94 flush_kernel_dcache_page_addr(pfn_va(pfn));
86} 95}
87 96
88void 97void
@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
495 504
496void flush_cache_mm(struct mm_struct *mm) 505void flush_cache_mm(struct mm_struct *mm)
497{ 506{
507 struct vm_area_struct *vma;
508 pgd_t *pgd;
509
498 /* Flushing the whole cache on each cpu takes forever on 510 /* Flushing the whole cache on each cpu takes forever on
499 rp3440, etc. So, avoid it if the mm isn't too big. */ 511 rp3440, etc. So, avoid it if the mm isn't too big. */
500 if (mm_total_size(mm) < parisc_cache_flush_threshold) { 512 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
501 struct vm_area_struct *vma; 513 flush_cache_all();
502 514 return;
503 if (mm->context == mfsp(3)) { 515 }
504 for (vma = mm->mmap; vma; vma = vma->vm_next) { 516
505 flush_user_dcache_range_asm(vma->vm_start, 517 if (mm->context == mfsp(3)) {
506 vma->vm_end); 518 for (vma = mm->mmap; vma; vma = vma->vm_next) {
507 if (vma->vm_flags & VM_EXEC) 519 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
508 flush_user_icache_range_asm( 520 if ((vma->vm_flags & VM_EXEC) == 0)
509 vma->vm_start, vma->vm_end); 521 continue;
510 } 522 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
511 } else {
512 pgd_t *pgd = mm->pgd;
513
514 for (vma = mm->mmap; vma; vma = vma->vm_next) {
515 unsigned long addr;
516
517 for (addr = vma->vm_start; addr < vma->vm_end;
518 addr += PAGE_SIZE) {
519 pte_t *ptep = get_ptep(pgd, addr);
520 if (ptep != NULL) {
521 pte_t pte = *ptep;
522 __flush_cache_page(vma, addr,
523 page_to_phys(pte_page(pte)));
524 }
525 }
526 }
527 } 523 }
528 return; 524 return;
529 } 525 }
530 526
531#ifdef CONFIG_SMP 527 pgd = mm->pgd;
532 flush_cache_all(); 528 for (vma = mm->mmap; vma; vma = vma->vm_next) {
533#else 529 unsigned long addr;
534 flush_cache_all_local(); 530
535#endif 531 for (addr = vma->vm_start; addr < vma->vm_end;
532 addr += PAGE_SIZE) {
533 unsigned long pfn;
534 pte_t *ptep = get_ptep(pgd, addr);
535 if (!ptep)
536 continue;
537 pfn = pte_pfn(*ptep);
538 if (!pfn_valid(pfn))
539 continue;
540 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
541 }
542 }
536} 543}
537 544
538void 545void
@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
556void flush_cache_range(struct vm_area_struct *vma, 563void flush_cache_range(struct vm_area_struct *vma,
557 unsigned long start, unsigned long end) 564 unsigned long start, unsigned long end)
558{ 565{
566 unsigned long addr;
567 pgd_t *pgd;
568
559 BUG_ON(!vma->vm_mm->context); 569 BUG_ON(!vma->vm_mm->context);
560 570
561 if ((end - start) < parisc_cache_flush_threshold) { 571 if ((end - start) >= parisc_cache_flush_threshold) {
562 if (vma->vm_mm->context == mfsp(3)) {
563 flush_user_dcache_range_asm(start, end);
564 if (vma->vm_flags & VM_EXEC)
565 flush_user_icache_range_asm(start, end);
566 } else {
567 unsigned long addr;
568 pgd_t *pgd = vma->vm_mm->pgd;
569
570 for (addr = start & PAGE_MASK; addr < end;
571 addr += PAGE_SIZE) {
572 pte_t *ptep = get_ptep(pgd, addr);
573 if (ptep != NULL) {
574 pte_t pte = *ptep;
575 flush_cache_page(vma,
576 addr, pte_pfn(pte));
577 }
578 }
579 }
580 } else {
581#ifdef CONFIG_SMP
582 flush_cache_all(); 572 flush_cache_all();
583#else 573 return;
584 flush_cache_all_local(); 574 }
585#endif 575
576 if (vma->vm_mm->context == mfsp(3)) {
577 flush_user_dcache_range_asm(start, end);
578 if (vma->vm_flags & VM_EXEC)
579 flush_user_icache_range_asm(start, end);
580 return;
581 }
582
583 pgd = vma->vm_mm->pgd;
584 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
585 unsigned long pfn;
586 pte_t *ptep = get_ptep(pgd, addr);
587 if (!ptep)
588 continue;
589 pfn = pte_pfn(*ptep);
590 if (pfn_valid(pfn))
591 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
586 } 592 }
587} 593}
588 594
@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
591{ 597{
592 BUG_ON(!vma->vm_mm->context); 598 BUG_ON(!vma->vm_mm->context);
593 599
594 flush_tlb_page(vma, vmaddr); 600 if (pfn_valid(pfn)) {
595 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 601 flush_tlb_page(vma, vmaddr);
596 602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 }
597} 604}
598 605
599#ifdef CONFIG_PARISC_TMPALIAS 606#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index f65fa480c905..22395901d47b 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -150,7 +150,7 @@ static void convert_to_wide(unsigned long *addr)
150} 150}
151 151
152#ifdef CONFIG_64BIT 152#ifdef CONFIG_64BIT
153void __cpuinit set_firmware_width_unlocked(void) 153void set_firmware_width_unlocked(void)
154{ 154{
155 int ret; 155 int ret;
156 156
@@ -167,7 +167,7 @@ void __cpuinit set_firmware_width_unlocked(void)
167 * This function must be called before any pdc_* function that uses the 167 * This function must be called before any pdc_* function that uses the
168 * convert_to_wide function. 168 * convert_to_wide function.
169 */ 169 */
170void __cpuinit set_firmware_width(void) 170void set_firmware_width(void)
171{ 171{
172 unsigned long flags; 172 unsigned long flags;
173 spin_lock_irqsave(&pdc_lock, flags); 173 spin_lock_irqsave(&pdc_lock, flags);
@@ -175,11 +175,13 @@ void __cpuinit set_firmware_width(void)
175 spin_unlock_irqrestore(&pdc_lock, flags); 175 spin_unlock_irqrestore(&pdc_lock, flags);
176} 176}
177#else 177#else
178void __cpuinit set_firmware_width_unlocked(void) { 178void set_firmware_width_unlocked(void)
179{
179 return; 180 return;
180} 181}
181 182
182void __cpuinit set_firmware_width(void) { 183void set_firmware_width(void)
184{
183 return; 185 return;
184} 186}
185#endif /*CONFIG_64BIT*/ 187#endif /*CONFIG_64BIT*/
@@ -301,7 +303,7 @@ int pdc_chassis_warn(unsigned long *warn)
301 return retval; 303 return retval;
302} 304}
303 305
304int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info) 306int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
305{ 307{
306 int ret; 308 int ret;
307 309
@@ -322,7 +324,7 @@ int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
322 * This PDC call returns the presence and status of all the coprocessors 324 * This PDC call returns the presence and status of all the coprocessors
323 * attached to the processor. 325 * attached to the processor.
324 */ 326 */
325int __cpuinit pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info) 327int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
326{ 328{
327 int ret; 329 int ret;
328 unsigned long flags; 330 unsigned long flags;
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 872275659d98..06cb3992907e 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1367,7 +1367,7 @@ const char *parisc_hardware_description(struct parisc_device_id *id)
1367 1367
1368 1368
1369/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */ 1369/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
1370enum cpu_type __cpuinit 1370enum cpu_type
1371parisc_get_cpu_type(unsigned long hversion) 1371parisc_get_cpu_type(unsigned long hversion)
1372{ 1372{
1373 struct hp_cpu_type_mask *ptr; 1373 struct hp_cpu_type_mask *ptr;
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4a185d..f0b6722fc706 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
211 /* REVISIT: who is the consumer of this? not sure yet... */ 211 /* REVISIT: who is the consumer of this? not sure yet... */
212 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ 212 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
213 dev->pmod_loc = pa_pdc_cell->mod_location; 213 dev->pmod_loc = pa_pdc_cell->mod_location;
214 dev->mod0 = pa_pdc_cell->mod[0];
214 215
215 register_parisc_device(dev); /* advertise device */ 216 register_parisc_device(dev); /* advertise device */
216 217
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 8a96c8ab9fe6..b68d977ce30f 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -73,7 +73,7 @@ extern int update_cr16_clocksource(void); /* from time.c */
73 * 73 *
74 * FIXME: doesn't do much yet... 74 * FIXME: doesn't do much yet...
75 */ 75 */
76static void __cpuinit 76static void
77init_percpu_prof(unsigned long cpunum) 77init_percpu_prof(unsigned long cpunum)
78{ 78{
79 struct cpuinfo_parisc *p; 79 struct cpuinfo_parisc *p;
@@ -92,7 +92,7 @@ init_percpu_prof(unsigned long cpunum)
92 * (return 1). If so, initialize the chip and tell other partners in crime 92 * (return 1). If so, initialize the chip and tell other partners in crime
93 * they have work to do. 93 * they have work to do.
94 */ 94 */
95static int __cpuinit processor_probe(struct parisc_device *dev) 95static int processor_probe(struct parisc_device *dev)
96{ 96{
97 unsigned long txn_addr; 97 unsigned long txn_addr;
98 unsigned long cpuid; 98 unsigned long cpuid;
@@ -299,7 +299,7 @@ void __init collect_boot_cpu_data(void)
299 * 299 *
300 * o Enable CPU profiling hooks. 300 * o Enable CPU profiling hooks.
301 */ 301 */
302int __cpuinit init_per_cpu(int cpunum) 302int init_per_cpu(int cpunum)
303{ 303{
304 int ret; 304 int ret;
305 struct pdc_coproc_cfg coproc_cfg; 305 struct pdc_coproc_cfg coproc_cfg;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 940188d1942c..07349b002687 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -56,13 +56,6 @@
56#define A(__x) ((unsigned long)(__x)) 56#define A(__x) ((unsigned long)(__x))
57 57
58/* 58/*
59 * Atomically swap in the new signal mask, and wait for a signal.
60 */
61#ifdef CONFIG_64BIT
62#include "sys32.h"
63#endif
64
65/*
66 * Do a signal return - restore sigcontext. 59 * Do a signal return - restore sigcontext.
67 */ 60 */
68 61
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 33eca1b04926..6c6a271a6140 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -34,7 +34,6 @@
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36#include "signal32.h" 36#include "signal32.h"
37#include "sys32.h"
38 37
39#define DEBUG_COMPAT_SIG 0 38#define DEBUG_COMPAT_SIG 0
40#define DEBUG_COMPAT_SIG_LEVEL 2 39#define DEBUG_COMPAT_SIG_LEVEL 2
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index e3614fb343e5..8a252f2d6c08 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -62,9 +62,9 @@ static int smp_debug_lvl = 0;
62volatile struct task_struct *smp_init_current_idle_task; 62volatile struct task_struct *smp_init_current_idle_task;
63 63
64/* track which CPU is booting */ 64/* track which CPU is booting */
65static volatile int cpu_now_booting __cpuinitdata; 65static volatile int cpu_now_booting;
66 66
67static int parisc_max_cpus __cpuinitdata = 1; 67static int parisc_max_cpus = 1;
68 68
69static DEFINE_PER_CPU(spinlock_t, ipi_lock); 69static DEFINE_PER_CPU(spinlock_t, ipi_lock);
70 70
@@ -328,7 +328,7 @@ void __init smp_callin(void)
328/* 328/*
329 * Bring one cpu online. 329 * Bring one cpu online.
330 */ 330 */
331int __cpuinit smp_boot_one_cpu(int cpuid, struct task_struct *idle) 331int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
332{ 332{
333 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); 333 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
334 long timeout; 334 long timeout;
@@ -424,7 +424,7 @@ void smp_cpus_done(unsigned int cpu_max)
424} 424}
425 425
426 426
427int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 427int __cpu_up(unsigned int cpu, struct task_struct *tidle)
428{ 428{
429 if (cpu != 0 && cpu < parisc_max_cpus) 429 if (cpu != 0 && cpu < parisc_max_cpus)
430 smp_boot_one_cpu(cpu, tidle); 430 smp_boot_one_cpu(cpu, tidle);
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
deleted file mode 100644
index 60dd470f39f8..000000000000
--- a/arch/parisc/kernel/sys32.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
3 * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
4 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef _PARISC64_KERNEL_SYS32_H
21#define _PARISC64_KERNEL_SYS32_H
22
23#include <linux/compat.h>
24
25/* Call a kernel syscall which will use kernel space instead of user
26 * space for its copy_to/from_user.
27 */
28#define KERNEL_SYSCALL(ret, syscall, args...) \
29{ \
30 mm_segment_t old_fs = get_fs(); \
31 set_fs(KERNEL_DS); \
32 ret = syscall(args); \
33 set_fs (old_fs); \
34}
35
36#endif
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index a134ff4da12e..bb9f3b64de55 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -42,8 +42,6 @@
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/mmu_context.h> 43#include <asm/mmu_context.h>
44 44
45#include "sys32.h"
46
47#undef DEBUG 45#undef DEBUG
48 46
49#ifdef DEBUG 47#ifdef DEBUG
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3bf72cd2c8fc..dbd9d3c991e8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -566,7 +566,7 @@ config SCHED_SMT
566config PPC_DENORMALISATION 566config PPC_DENORMALISATION
567 bool "PowerPC denormalisation exception handling" 567 bool "PowerPC denormalisation exception handling"
568 depends on PPC_BOOK3S_64 568 depends on PPC_BOOK3S_64
569 default "n" 569 default "y" if PPC_POWERNV
570 ---help--- 570 ---help---
571 Add support for handling denormalisation of single precision 571 Add support for handling denormalisation of single precision
572 values. Useful for bare metal only. If unsure say Y here. 572 values. Useful for bare metal only. If unsure say Y here.
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index c86fcb92358e..0e8cfd09da2f 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -58,7 +58,7 @@ CONFIG_SCHED_SMT=y
58CONFIG_PPC_DENORMALISATION=y 58CONFIG_PPC_DENORMALISATION=y
59CONFIG_PCCARD=y 59CONFIG_PCCARD=y
60CONFIG_ELECTRA_CF=y 60CONFIG_ELECTRA_CF=y
61CONFIG_HOTPLUG_PCI=m 61CONFIG_HOTPLUG_PCI=y
62CONFIG_HOTPLUG_PCI_RPA=m 62CONFIG_HOTPLUG_PCI_RPA=m
63CONFIG_HOTPLUG_PCI_RPA_DLPAR=m 63CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
64CONFIG_PACKET=y 64CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 4b20f76172e2..0085dc4642c5 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -32,7 +32,7 @@ CONFIG_IRQ_ALL_CPUS=y
32CONFIG_SPARSEMEM_MANUAL=y 32CONFIG_SPARSEMEM_MANUAL=y
33CONFIG_PCI_MSI=y 33CONFIG_PCI_MSI=y
34CONFIG_PCCARD=y 34CONFIG_PCCARD=y
35CONFIG_HOTPLUG_PCI=m 35CONFIG_HOTPLUG_PCI=y
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
38CONFIG_XFRM_USER=m 38CONFIG_XFRM_USER=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index bea8587c3af5..1d4b9763895d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -53,7 +53,7 @@ CONFIG_PPC_64K_PAGES=y
53CONFIG_PPC_SUBPAGE_PROT=y 53CONFIG_PPC_SUBPAGE_PROT=y
54CONFIG_SCHED_SMT=y 54CONFIG_SCHED_SMT=y
55CONFIG_PPC_DENORMALISATION=y 55CONFIG_PPC_DENORMALISATION=y
56CONFIG_HOTPLUG_PCI=m 56CONFIG_HOTPLUG_PCI=y
57CONFIG_HOTPLUG_PCI_RPA=m 57CONFIG_HOTPLUG_PCI_RPA=m
58CONFIG_HOTPLUG_PCI_RPA_DLPAR=m 58CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
59CONFIG_PACKET=y 59CONFIG_PACKET=y
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 09a8743143f3..d3e5e9bc8f94 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -55,6 +55,8 @@ struct device_node;
55#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 55#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
56#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */ 56#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */
57 57
58#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
59
58struct eeh_pe { 60struct eeh_pe {
59 int type; /* PE type: PHB/Bus/Device */ 61 int type; /* PE type: PHB/Bus/Device */
60 int state; /* PE EEH dependent mode */ 62 int state; /* PE EEH dependent mode */
@@ -72,8 +74,8 @@ struct eeh_pe {
72 struct list_head child; /* Child PEs */ 74 struct list_head child; /* Child PEs */
73}; 75};
74 76
75#define eeh_pe_for_each_dev(pe, edev) \ 77#define eeh_pe_for_each_dev(pe, edev, tmp) \
76 list_for_each_entry(edev, &pe->edevs, list) 78 list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
77 79
78/* 80/*
79 * The struct is used to trace EEH state for the associated 81 * The struct is used to trace EEH state for the associated
@@ -82,7 +84,13 @@ struct eeh_pe {
82 * another tree except the currently existing tree of PCI 84 * another tree except the currently existing tree of PCI
83 * buses and PCI devices 85 * buses and PCI devices
84 */ 86 */
85#define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */ 87#define EEH_DEV_BRIDGE (1 << 0) /* PCI bridge */
88#define EEH_DEV_ROOT_PORT (1 << 1) /* PCIe root port */
89#define EEH_DEV_DS_PORT (1 << 2) /* Downstream port */
90#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
91#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
92
93#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */
86 94
87struct eeh_dev { 95struct eeh_dev {
88 int mode; /* EEH mode */ 96 int mode; /* EEH mode */
@@ -90,11 +98,13 @@ struct eeh_dev {
90 int config_addr; /* Config address */ 98 int config_addr; /* Config address */
91 int pe_config_addr; /* PE config address */ 99 int pe_config_addr; /* PE config address */
92 u32 config_space[16]; /* Saved PCI config space */ 100 u32 config_space[16]; /* Saved PCI config space */
101 u8 pcie_cap; /* Saved PCIe capability */
93 struct eeh_pe *pe; /* Associated PE */ 102 struct eeh_pe *pe; /* Associated PE */
94 struct list_head list; /* Form link list in the PE */ 103 struct list_head list; /* Form link list in the PE */
95 struct pci_controller *phb; /* Associated PHB */ 104 struct pci_controller *phb; /* Associated PHB */
96 struct device_node *dn; /* Associated device node */ 105 struct device_node *dn; /* Associated device node */
97 struct pci_dev *pdev; /* Associated PCI device */ 106 struct pci_dev *pdev; /* Associated PCI device */
107 struct pci_bus *bus; /* PCI bus for partial hotplug */
98}; 108};
99 109
100static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev) 110static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
@@ -193,8 +203,10 @@ int eeh_phb_pe_create(struct pci_controller *phb);
193struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); 203struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
194struct eeh_pe *eeh_pe_get(struct eeh_dev *edev); 204struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
195int eeh_add_to_parent_pe(struct eeh_dev *edev); 205int eeh_add_to_parent_pe(struct eeh_dev *edev);
196int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe); 206int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
197void eeh_pe_update_time_stamp(struct eeh_pe *pe); 207void eeh_pe_update_time_stamp(struct eeh_pe *pe);
208void *eeh_pe_traverse(struct eeh_pe *root,
209 eeh_traverse_func fn, void *flag);
198void *eeh_pe_dev_traverse(struct eeh_pe *root, 210void *eeh_pe_dev_traverse(struct eeh_pe *root,
199 eeh_traverse_func fn, void *flag); 211 eeh_traverse_func fn, void *flag);
200void eeh_pe_restore_bars(struct eeh_pe *pe); 212void eeh_pe_restore_bars(struct eeh_pe *pe);
@@ -209,10 +221,12 @@ unsigned long eeh_check_failure(const volatile void __iomem *token,
209 unsigned long val); 221 unsigned long val);
210int eeh_dev_check_failure(struct eeh_dev *edev); 222int eeh_dev_check_failure(struct eeh_dev *edev);
211void eeh_addr_cache_build(void); 223void eeh_addr_cache_build(void);
224void eeh_add_device_early(struct device_node *);
212void eeh_add_device_tree_early(struct device_node *); 225void eeh_add_device_tree_early(struct device_node *);
226void eeh_add_device_late(struct pci_dev *);
213void eeh_add_device_tree_late(struct pci_bus *); 227void eeh_add_device_tree_late(struct pci_bus *);
214void eeh_add_sysfs_files(struct pci_bus *); 228void eeh_add_sysfs_files(struct pci_bus *);
215void eeh_remove_bus_device(struct pci_dev *, int); 229void eeh_remove_device(struct pci_dev *);
216 230
217/** 231/**
218 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. 232 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -252,13 +266,17 @@ static inline unsigned long eeh_check_failure(const volatile void __iomem *token
252 266
253static inline void eeh_addr_cache_build(void) { } 267static inline void eeh_addr_cache_build(void) { }
254 268
269static inline void eeh_add_device_early(struct device_node *dn) { }
270
255static inline void eeh_add_device_tree_early(struct device_node *dn) { } 271static inline void eeh_add_device_tree_early(struct device_node *dn) { }
256 272
273static inline void eeh_add_device_late(struct pci_dev *dev) { }
274
257static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } 275static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
258 276
259static inline void eeh_add_sysfs_files(struct pci_bus *bus) { } 277static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
260 278
261static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { } 279static inline void eeh_remove_device(struct pci_dev *dev) { }
262 280
263#define EEH_POSSIBLE_ERROR(val, type) (0) 281#define EEH_POSSIBLE_ERROR(val, type) (0)
264#define EEH_IO_ERROR_VALUE(size) (-1UL) 282#define EEH_IO_ERROR_VALUE(size) (-1UL)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index ba713f166fa5..10be1dd01c6b 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -96,10 +96,11 @@ static inline bool arch_irqs_disabled(void)
96#endif 96#endif
97 97
98#define hard_irq_disable() do { \ 98#define hard_irq_disable() do { \
99 u8 _was_enabled = get_paca()->soft_enabled; \ 99 u8 _was_enabled; \
100 __hard_irq_disable(); \ 100 __hard_irq_disable(); \
101 get_paca()->soft_enabled = 0; \ 101 _was_enabled = local_paca->soft_enabled; \
102 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ 102 local_paca->soft_enabled = 0; \
103 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
103 if (_was_enabled) \ 104 if (_was_enabled) \
104 trace_hardirqs_off(); \ 105 trace_hardirqs_off(); \
105} while(0) 106} while(0)
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590ec444..49fa55bfbac4 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@ struct exception_table_entry;
82void sort_ex_table(struct exception_table_entry *start, 82void sort_ex_table(struct exception_table_entry *start,
83 struct exception_table_entry *finish); 83 struct exception_table_entry *finish);
84 84
85#ifdef CONFIG_MODVERSIONS 85#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
86#define ARCH_RELOCATES_KCRCTAB 86#define ARCH_RELOCATES_KCRCTAB
87 87#define reloc_start PHYSICAL_START
88extern const unsigned long reloc_start[];
89#endif 88#endif
90#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
91#endif /* _ASM_POWERPC_MODULE_H */ 90#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 2c1d8cb9b265..32d0d2018faf 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -209,7 +209,6 @@ static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
209extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 209extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
210 210
211/** Remove all of the PCI devices under this bus */ 211/** Remove all of the PCI devices under this bus */
212extern void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe);
213extern void pcibios_remove_pci_devices(struct pci_bus *bus); 212extern void pcibios_remove_pci_devices(struct pci_bus *bus);
214 213
215/** Discover new pci devices under this bus, and add them */ 214/** Discover new pci devices under this bus, and add them */
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 2dd7bfc459be..8b2492644754 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <uapi/asm/perf_event.h>
15 16
16#define MAX_HWEVENTS 8 17#define MAX_HWEVENTS 8
17#define MAX_EVENT_ALTERNATIVES 8 18#define MAX_EVENT_ALTERNATIVES 8
@@ -69,11 +70,6 @@ struct power_pmu {
69#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 70#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
70#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 71#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
71 72
72/*
73 * We use the event config bit 63 as a flag to request EBB.
74 */
75#define EVENT_CONFIG_EBB_SHIFT 63
76
77extern int register_power_pmu(struct power_pmu *); 73extern int register_power_pmu(struct power_pmu *);
78 74
79struct pt_regs; 75struct pt_regs;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 47a35b08b963..e378cccfca55 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@ struct thread_struct {
247 unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ 247 unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
248 struct pt_regs ckpt_regs; /* Checkpointed registers */ 248 struct pt_regs ckpt_regs; /* Checkpointed registers */
249 249
250 unsigned long tm_tar;
251 unsigned long tm_ppr;
252 unsigned long tm_dscr;
253
250 /* 254 /*
251 * Transactional FP and VSX 0-31 register set. 255 * Transactional FP and VSX 0-31 register set.
252 * NOTE: the sense of these is the opposite of the integer ckpt_regs! 256 * NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5d7d9c2a5473..99222e27f173 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,19 +254,28 @@
254#define SPRN_HRMOR 0x139 /* Real mode offset register */ 254#define SPRN_HRMOR 0x139 /* Real mode offset register */
255#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ 255#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
256#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ 256#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
257/* HFSCR and FSCR bit numbers are the same */
258#define FSCR_TAR_LG 8 /* Enable Target Address Register */
259#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
260#define FSCR_TM_LG 5 /* Enable Transactional Memory */
261#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
262#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
263#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
264#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
265#define FSCR_FP_LG 0 /* Enable Floating Point */
257#define SPRN_FSCR 0x099 /* Facility Status & Control Register */ 266#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
258#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ 267#define FSCR_TAR __MASK(FSCR_TAR_LG)
259#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ 268#define FSCR_EBB __MASK(FSCR_EBB_LG)
260#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ 269#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
261#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ 270#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
262#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ 271#define HFSCR_TAR __MASK(FSCR_TAR_LG)
263#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ 272#define HFSCR_EBB __MASK(FSCR_EBB_LG)
264#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ 273#define HFSCR_TM __MASK(FSCR_TM_LG)
265#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ 274#define HFSCR_PM __MASK(FSCR_PM_LG)
266#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ 275#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
267#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ 276#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
268#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ 277#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
269#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ 278#define HFSCR_FP __MASK(FSCR_FP_LG)
270#define SPRN_TAR 0x32f /* Target Address Register */ 279#define SPRN_TAR 0x32f /* Target Address Register */
271#define SPRN_LPCR 0x13E /* LPAR Control Register */ 280#define SPRN_LPCR 0x13E /* LPAR Control Register */
272#define LPCR_VPM0 (1ul << (63-0)) 281#define LPCR_VPM0 (1ul << (63-0))
@@ -1088,7 +1097,8 @@
1088#define PVR_970MP 0x0044 1097#define PVR_970MP 0x0044
1089#define PVR_970GX 0x0045 1098#define PVR_970GX 0x0045
1090#define PVR_POWER7p 0x004A 1099#define PVR_POWER7p 0x004A
1091#define PVR_POWER8 0x004B 1100#define PVR_POWER8E 0x004B
1101#define PVR_POWER8 0x004D
1092#define PVR_BE 0x0070 1102#define PVR_BE 0x0070
1093#define PVR_PA6T 0x0090 1103#define PVR_PA6T 0x0090
1094 1104
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabebcdca..48cfc858abd6 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
145#define smp_setup_cpu_maps() 145#define smp_setup_cpu_maps()
146static inline void inhibit_secondary_onlining(void) {} 146static inline void inhibit_secondary_onlining(void) {}
147static inline void uninhibit_secondary_onlining(void) {} 147static inline void uninhibit_secondary_onlining(void) {}
148static inline const struct cpumask *cpu_sibling_mask(int cpu)
149{
150 return cpumask_of(cpu);
151}
148 152
149#endif /* CONFIG_SMP */ 153#endif /* CONFIG_SMP */
150 154
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 49a13e0ef234..294c2cedcf7a 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
15struct thread_struct; 15struct thread_struct;
16extern struct task_struct *_switch(struct thread_struct *prev, 16extern struct task_struct *_switch(struct thread_struct *prev,
17 struct thread_struct *next); 17 struct thread_struct *next);
18#ifdef CONFIG_PPC_BOOK3S_64
19static inline void save_tar(struct thread_struct *prev)
20{
21 if (cpu_has_feature(CPU_FTR_ARCH_207S))
22 prev->tar = mfspr(SPRN_TAR);
23}
24#else
25static inline void save_tar(struct thread_struct *prev) {}
26#endif
18 27
19extern void giveup_fpu(struct task_struct *); 28extern void giveup_fpu(struct task_struct *);
20extern void load_up_fpu(void); 29extern void load_up_fpu(void);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 5182c8622b54..48be855ef37b 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -20,6 +20,7 @@ header-y += mman.h
20header-y += msgbuf.h 20header-y += msgbuf.h
21header-y += nvram.h 21header-y += nvram.h
22header-y += param.h 22header-y += param.h
23header-y += perf_event.h
23header-y += poll.h 24header-y += poll.h
24header-y += posix_types.h 25header-y += posix_types.h
25header-y += ps3fb.h 26header-y += ps3fb.h
diff --git a/arch/powerpc/include/uapi/asm/perf_event.h b/arch/powerpc/include/uapi/asm/perf_event.h
new file mode 100644
index 000000000000..80a4d40cf5bc
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/perf_event.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright 2013 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2 of the
7 * License.
8 */
9
10#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H
11#define _UAPI_ASM_POWERPC_PERF_EVENT_H
12
13/*
14 * We use bit 63 of perf_event_attr.config as a flag to request EBB.
15 */
16#define PERF_EVENT_CONFIG_EBB_SHIFT 63
17
18#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c7e8afc2ead0..8207459efe56 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -138,6 +138,9 @@ int main(void)
138 DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); 138 DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
139 DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); 139 DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
140 DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); 140 DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
141 DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
142 DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
143 DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
141 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); 144 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
142 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, 145 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
143 transact_vr[0])); 146 transact_vr[0]));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 2a45d0f04385..22973a74df73 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -494,9 +494,27 @@ static struct cpu_spec __initdata cpu_specs[] = {
494 .cpu_restore = __restore_cpu_power7, 494 .cpu_restore = __restore_cpu_power7,
495 .platform = "power7+", 495 .platform = "power7+",
496 }, 496 },
497 { /* Power8 */ 497 { /* Power8E */
498 .pvr_mask = 0xffff0000, 498 .pvr_mask = 0xffff0000,
499 .pvr_value = 0x004b0000, 499 .pvr_value = 0x004b0000,
500 .cpu_name = "POWER8E (raw)",
501 .cpu_features = CPU_FTRS_POWER8,
502 .cpu_user_features = COMMON_USER_POWER8,
503 .cpu_user_features2 = COMMON_USER2_POWER8,
504 .mmu_features = MMU_FTRS_POWER8,
505 .icache_bsize = 128,
506 .dcache_bsize = 128,
507 .num_pmcs = 6,
508 .pmc_type = PPC_PMC_IBM,
509 .oprofile_cpu_type = "ppc64/power8",
510 .oprofile_type = PPC_OPROFILE_INVALID,
511 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8,
513 .platform = "power8",
514 },
515 { /* Power8 */
516 .pvr_mask = 0xffff0000,
517 .pvr_value = 0x004d0000,
500 .cpu_name = "POWER8 (raw)", 518 .cpu_name = "POWER8 (raw)",
501 .cpu_features = CPU_FTRS_POWER8, 519 .cpu_features = CPU_FTRS_POWER8,
502 .cpu_user_features = COMMON_USER_POWER8, 520 .cpu_user_features = COMMON_USER_POWER8,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 39954fe941b8..55593ee2d5aa 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -231,7 +231,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
231void eeh_slot_error_detail(struct eeh_pe *pe, int severity) 231void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
232{ 232{
233 size_t loglen = 0; 233 size_t loglen = 0;
234 struct eeh_dev *edev; 234 struct eeh_dev *edev, *tmp;
235 bool valid_cfg_log = true; 235 bool valid_cfg_log = true;
236 236
237 /* 237 /*
@@ -251,7 +251,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
251 eeh_pe_restore_bars(pe); 251 eeh_pe_restore_bars(pe);
252 252
253 pci_regs_buf[0] = 0; 253 pci_regs_buf[0] = 0;
254 eeh_pe_for_each_dev(pe, edev) { 254 eeh_pe_for_each_dev(pe, edev, tmp) {
255 loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen, 255 loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
256 EEH_PCI_REGS_LOG_LEN - loglen); 256 EEH_PCI_REGS_LOG_LEN - loglen);
257 } 257 }
@@ -499,8 +499,6 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
499 } 499 }
500 500
501 eeh_dev_check_failure(edev); 501 eeh_dev_check_failure(edev);
502
503 pci_dev_put(eeh_dev_to_pci_dev(edev));
504 return val; 502 return val;
505} 503}
506 504
@@ -838,7 +836,7 @@ core_initcall_sync(eeh_init);
838 * on the CEC architecture, type of the device, on earlier boot 836 * on the CEC architecture, type of the device, on earlier boot
839 * command-line arguments & etc. 837 * command-line arguments & etc.
840 */ 838 */
841static void eeh_add_device_early(struct device_node *dn) 839void eeh_add_device_early(struct device_node *dn)
842{ 840{
843 struct pci_controller *phb; 841 struct pci_controller *phb;
844 842
@@ -886,7 +884,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
886 * This routine must be used to complete EEH initialization for PCI 884 * This routine must be used to complete EEH initialization for PCI
887 * devices that were added after system boot (e.g. hotplug, dlpar). 885 * devices that were added after system boot (e.g. hotplug, dlpar).
888 */ 886 */
889static void eeh_add_device_late(struct pci_dev *dev) 887void eeh_add_device_late(struct pci_dev *dev)
890{ 888{
891 struct device_node *dn; 889 struct device_node *dn;
892 struct eeh_dev *edev; 890 struct eeh_dev *edev;
@@ -902,9 +900,23 @@ static void eeh_add_device_late(struct pci_dev *dev)
902 pr_debug("EEH: Already referenced !\n"); 900 pr_debug("EEH: Already referenced !\n");
903 return; 901 return;
904 } 902 }
905 WARN_ON(edev->pdev);
906 903
907 pci_dev_get(dev); 904 /*
905 * The EEH cache might not be removed correctly because of
906 * unbalanced kref to the device during unplug time, which
907 * relies on pcibios_release_device(). So we have to remove
908 * that here explicitly.
909 */
910 if (edev->pdev) {
911 eeh_rmv_from_parent_pe(edev);
912 eeh_addr_cache_rmv_dev(edev->pdev);
913 eeh_sysfs_remove_device(edev->pdev);
914 edev->mode &= ~EEH_DEV_SYSFS;
915
916 edev->pdev = NULL;
917 dev->dev.archdata.edev = NULL;
918 }
919
908 edev->pdev = dev; 920 edev->pdev = dev;
909 dev->dev.archdata.edev = edev; 921 dev->dev.archdata.edev = edev;
910 922
@@ -967,7 +979,6 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
967/** 979/**
968 * eeh_remove_device - Undo EEH setup for the indicated pci device 980 * eeh_remove_device - Undo EEH setup for the indicated pci device
969 * @dev: pci device to be removed 981 * @dev: pci device to be removed
970 * @purge_pe: remove the PE or not
971 * 982 *
972 * This routine should be called when a device is removed from 983 * This routine should be called when a device is removed from
973 * a running system (e.g. by hotplug or dlpar). It unregisters 984 * a running system (e.g. by hotplug or dlpar). It unregisters
@@ -975,7 +986,7 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
975 * this device will no longer be detected after this call; thus, 986 * this device will no longer be detected after this call; thus,
976 * i/o errors affecting this slot may leave this device unusable. 987 * i/o errors affecting this slot may leave this device unusable.
977 */ 988 */
978static void eeh_remove_device(struct pci_dev *dev, int purge_pe) 989void eeh_remove_device(struct pci_dev *dev)
979{ 990{
980 struct eeh_dev *edev; 991 struct eeh_dev *edev;
981 992
@@ -986,42 +997,29 @@ static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
986 /* Unregister the device with the EEH/PCI address search system */ 997 /* Unregister the device with the EEH/PCI address search system */
987 pr_debug("EEH: Removing device %s\n", pci_name(dev)); 998 pr_debug("EEH: Removing device %s\n", pci_name(dev));
988 999
989 if (!edev || !edev->pdev) { 1000 if (!edev || !edev->pdev || !edev->pe) {
990 pr_debug("EEH: Not referenced !\n"); 1001 pr_debug("EEH: Not referenced !\n");
991 return; 1002 return;
992 } 1003 }
1004
1005 /*
1006 * During the hotplug for EEH error recovery, we need the EEH
1007 * device attached to the parent PE in order for BAR restore
1008 * a bit later. So we keep it for BAR restore and remove it
1009 * from the parent PE during the BAR resotre.
1010 */
993 edev->pdev = NULL; 1011 edev->pdev = NULL;
994 dev->dev.archdata.edev = NULL; 1012 dev->dev.archdata.edev = NULL;
995 pci_dev_put(dev); 1013 if (!(edev->pe->state & EEH_PE_KEEP))
1014 eeh_rmv_from_parent_pe(edev);
1015 else
1016 edev->mode |= EEH_DEV_DISCONNECTED;
996 1017
997 eeh_rmv_from_parent_pe(edev, purge_pe);
998 eeh_addr_cache_rmv_dev(dev); 1018 eeh_addr_cache_rmv_dev(dev);
999 eeh_sysfs_remove_device(dev); 1019 eeh_sysfs_remove_device(dev);
1020 edev->mode &= ~EEH_DEV_SYSFS;
1000} 1021}
1001 1022
1002/**
1003 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
1004 * @dev: PCI device
1005 * @purge_pe: remove the corresponding PE or not
1006 *
1007 * This routine must be called when a device is removed from the
1008 * running system through hotplug or dlpar. The corresponding
1009 * PCI address cache will be removed.
1010 */
1011void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
1012{
1013 struct pci_bus *bus = dev->subordinate;
1014 struct pci_dev *child, *tmp;
1015
1016 eeh_remove_device(dev, purge_pe);
1017
1018 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1019 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
1020 eeh_remove_bus_device(child, purge_pe);
1021 }
1022}
1023EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
1024
1025static int proc_eeh_show(struct seq_file *m, void *v) 1023static int proc_eeh_show(struct seq_file *m, void *v)
1026{ 1024{
1027 if (0 == eeh_subsystem_enabled) { 1025 if (0 == eeh_subsystem_enabled) {
@@ -1063,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
1063 1061
1064static int __init eeh_init_proc(void) 1062static int __init eeh_init_proc(void)
1065{ 1063{
1066 if (machine_is(pseries)) 1064 if (machine_is(pseries) || machine_is(powernv))
1067 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); 1065 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
1068 return 0; 1066 return 0;
1069} 1067}
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index f9ac1232a746..e8c9fd546a5c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -68,16 +68,12 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
68 struct pci_io_addr_range *piar; 68 struct pci_io_addr_range *piar;
69 piar = rb_entry(n, struct pci_io_addr_range, rb_node); 69 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
70 70
71 if (addr < piar->addr_lo) { 71 if (addr < piar->addr_lo)
72 n = n->rb_left; 72 n = n->rb_left;
73 } else { 73 else if (addr > piar->addr_hi)
74 if (addr > piar->addr_hi) { 74 n = n->rb_right;
75 n = n->rb_right; 75 else
76 } else { 76 return piar->edev;
77 pci_dev_get(piar->pcidev);
78 return piar->edev;
79 }
80 }
81 } 77 }
82 78
83 return NULL; 79 return NULL;
@@ -156,7 +152,6 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
156 if (!piar) 152 if (!piar)
157 return NULL; 153 return NULL;
158 154
159 pci_dev_get(dev);
160 piar->addr_lo = alo; 155 piar->addr_lo = alo;
161 piar->addr_hi = ahi; 156 piar->addr_hi = ahi;
162 piar->edev = pci_dev_to_eeh_dev(dev); 157 piar->edev = pci_dev_to_eeh_dev(dev);
@@ -250,7 +245,6 @@ restart:
250 245
251 if (piar->pcidev == dev) { 246 if (piar->pcidev == dev) {
252 rb_erase(n, &pci_io_addr_cache_root.rb_root); 247 rb_erase(n, &pci_io_addr_cache_root.rb_root);
253 pci_dev_put(piar->pcidev);
254 kfree(piar); 248 kfree(piar);
255 goto restart; 249 goto restart;
256 } 250 }
@@ -302,12 +296,10 @@ void eeh_addr_cache_build(void)
302 if (!edev) 296 if (!edev)
303 continue; 297 continue;
304 298
305 pci_dev_get(dev); /* matching put is in eeh_remove_device() */
306 dev->dev.archdata.edev = edev; 299 dev->dev.archdata.edev = edev;
307 edev->pdev = dev; 300 edev->pdev = dev;
308 301
309 eeh_addr_cache_insert_dev(dev); 302 eeh_addr_cache_insert_dev(dev);
310
311 eeh_sysfs_add_device(dev); 303 eeh_sysfs_add_device(dev);
312 } 304 }
313 305
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 2b1ce17cae50..36bed5a12750 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -143,10 +143,14 @@ static void eeh_disable_irq(struct pci_dev *dev)
143static void eeh_enable_irq(struct pci_dev *dev) 143static void eeh_enable_irq(struct pci_dev *dev)
144{ 144{
145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
146 struct irq_desc *desc;
146 147
147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 148 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
148 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 149 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
149 enable_irq(dev->irq); 150
151 desc = irq_to_desc(dev->irq);
152 if (desc && desc->depth > 0)
153 enable_irq(dev->irq);
150 } 154 }
151} 155}
152 156
@@ -338,6 +342,54 @@ static void *eeh_report_failure(void *data, void *userdata)
338 return NULL; 342 return NULL;
339} 343}
340 344
345static void *eeh_rmv_device(void *data, void *userdata)
346{
347 struct pci_driver *driver;
348 struct eeh_dev *edev = (struct eeh_dev *)data;
349 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
350 int *removed = (int *)userdata;
351
352 /*
353 * Actually, we should remove the PCI bridges as well.
354 * However, that's lots of complexity to do that,
355 * particularly some of devices under the bridge might
356 * support EEH. So we just care about PCI devices for
357 * simplicity here.
358 */
359 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
360 return NULL;
361 driver = eeh_pcid_get(dev);
362 if (driver && driver->err_handler)
363 return NULL;
364
365 /* Remove it from PCI subsystem */
366 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
367 pci_name(dev));
368 edev->bus = dev->bus;
369 edev->mode |= EEH_DEV_DISCONNECTED;
370 (*removed)++;
371
372 pci_stop_and_remove_bus_device(dev);
373
374 return NULL;
375}
376
377static void *eeh_pe_detach_dev(void *data, void *userdata)
378{
379 struct eeh_pe *pe = (struct eeh_pe *)data;
380 struct eeh_dev *edev, *tmp;
381
382 eeh_pe_for_each_dev(pe, edev, tmp) {
383 if (!(edev->mode & EEH_DEV_DISCONNECTED))
384 continue;
385
386 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
387 eeh_rmv_from_parent_pe(edev);
388 }
389
390 return NULL;
391}
392
341/** 393/**
342 * eeh_reset_device - Perform actual reset of a pci slot 394 * eeh_reset_device - Perform actual reset of a pci slot
343 * @pe: EEH PE 395 * @pe: EEH PE
@@ -349,8 +401,9 @@ static void *eeh_report_failure(void *data, void *userdata)
349 */ 401 */
350static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) 402static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
351{ 403{
404 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
352 struct timeval tstamp; 405 struct timeval tstamp;
353 int cnt, rc; 406 int cnt, rc, removed = 0;
354 407
355 /* pcibios will clear the counter; save the value */ 408 /* pcibios will clear the counter; save the value */
356 cnt = pe->freeze_count; 409 cnt = pe->freeze_count;
@@ -362,8 +415,11 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
362 * devices are expected to be attached soon when calling 415 * devices are expected to be attached soon when calling
363 * into pcibios_add_pci_devices(). 416 * into pcibios_add_pci_devices().
364 */ 417 */
418 eeh_pe_state_mark(pe, EEH_PE_KEEP);
365 if (bus) 419 if (bus)
366 __pcibios_remove_pci_devices(bus, 0); 420 pcibios_remove_pci_devices(bus);
421 else if (frozen_bus)
422 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
367 423
368 /* Reset the pci controller. (Asserts RST#; resets config space). 424 /* Reset the pci controller. (Asserts RST#; resets config space).
369 * Reconfigure bridges and devices. Don't try to bring the system 425 * Reconfigure bridges and devices. Don't try to bring the system
@@ -384,9 +440,24 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
384 * potentially weird things happen. 440 * potentially weird things happen.
385 */ 441 */
386 if (bus) { 442 if (bus) {
443 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
387 ssleep(5); 444 ssleep(5);
445
446 /*
447 * The EEH device is still connected with its parent
448 * PE. We should disconnect it so the binding can be
449 * rebuilt when adding PCI devices.
450 */
451 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
388 pcibios_add_pci_devices(bus); 452 pcibios_add_pci_devices(bus);
453 } else if (frozen_bus && removed) {
454 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
455 ssleep(5);
456
457 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
458 pcibios_add_pci_devices(frozen_bus);
389 } 459 }
460 eeh_pe_state_clear(pe, EEH_PE_KEEP);
390 461
391 pe->tstamp = tstamp; 462 pe->tstamp = tstamp;
392 pe->freeze_count = cnt; 463 pe->freeze_count = cnt;
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 016588a6f5ed..f9450537e335 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -149,8 +149,8 @@ static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
149 * callback returns something other than NULL, or no more PEs 149 * callback returns something other than NULL, or no more PEs
150 * to be traversed. 150 * to be traversed.
151 */ 151 */
152static void *eeh_pe_traverse(struct eeh_pe *root, 152void *eeh_pe_traverse(struct eeh_pe *root,
153 eeh_traverse_func fn, void *flag) 153 eeh_traverse_func fn, void *flag)
154{ 154{
155 struct eeh_pe *pe; 155 struct eeh_pe *pe;
156 void *ret; 156 void *ret;
@@ -176,7 +176,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
176 eeh_traverse_func fn, void *flag) 176 eeh_traverse_func fn, void *flag)
177{ 177{
178 struct eeh_pe *pe; 178 struct eeh_pe *pe;
179 struct eeh_dev *edev; 179 struct eeh_dev *edev, *tmp;
180 void *ret; 180 void *ret;
181 181
182 if (!root) { 182 if (!root) {
@@ -186,7 +186,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
186 186
187 /* Traverse root PE */ 187 /* Traverse root PE */
188 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 188 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
189 eeh_pe_for_each_dev(pe, edev) { 189 eeh_pe_for_each_dev(pe, edev, tmp) {
190 ret = fn(edev, flag); 190 ret = fn(edev, flag);
191 if (ret) 191 if (ret)
192 return ret; 192 return ret;
@@ -333,7 +333,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
333 while (parent) { 333 while (parent) {
334 if (!(parent->type & EEH_PE_INVALID)) 334 if (!(parent->type & EEH_PE_INVALID))
335 break; 335 break;
336 parent->type &= ~EEH_PE_INVALID; 336 parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
337 parent = parent->parent; 337 parent = parent->parent;
338 } 338 }
339 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", 339 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
@@ -397,21 +397,20 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
397/** 397/**
398 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 398 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
399 * @edev: EEH device 399 * @edev: EEH device
400 * @purge_pe: remove PE or not
401 * 400 *
402 * The PE hierarchy tree might be changed when doing PCI hotplug. 401 * The PE hierarchy tree might be changed when doing PCI hotplug.
403 * Also, the PCI devices or buses could be removed from the system 402 * Also, the PCI devices or buses could be removed from the system
404 * during EEH recovery. So we have to call the function remove the 403 * during EEH recovery. So we have to call the function remove the
405 * corresponding PE accordingly if necessary. 404 * corresponding PE accordingly if necessary.
406 */ 405 */
407int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) 406int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
408{ 407{
409 struct eeh_pe *pe, *parent, *child; 408 struct eeh_pe *pe, *parent, *child;
410 int cnt; 409 int cnt;
411 410
412 if (!edev->pe) { 411 if (!edev->pe) {
413 pr_warning("%s: No PE found for EEH device %s\n", 412 pr_debug("%s: No PE found for EEH device %s\n",
414 __func__, edev->dn->full_name); 413 __func__, edev->dn->full_name);
415 return -EEXIST; 414 return -EEXIST;
416 } 415 }
417 416
@@ -431,7 +430,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
431 if (pe->type & EEH_PE_PHB) 430 if (pe->type & EEH_PE_PHB)
432 break; 431 break;
433 432
434 if (purge_pe) { 433 if (!(pe->state & EEH_PE_KEEP)) {
435 if (list_empty(&pe->edevs) && 434 if (list_empty(&pe->edevs) &&
436 list_empty(&pe->child_list)) { 435 list_empty(&pe->child_list)) {
437 list_del(&pe->child); 436 list_del(&pe->child);
@@ -502,7 +501,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
502{ 501{
503 struct eeh_pe *pe = (struct eeh_pe *)data; 502 struct eeh_pe *pe = (struct eeh_pe *)data;
504 int state = *((int *)flag); 503 int state = *((int *)flag);
505 struct eeh_dev *tmp; 504 struct eeh_dev *edev, *tmp;
506 struct pci_dev *pdev; 505 struct pci_dev *pdev;
507 506
508 /* 507 /*
@@ -512,8 +511,8 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
512 * the PCI device driver. 511 * the PCI device driver.
513 */ 512 */
514 pe->state |= state; 513 pe->state |= state;
515 eeh_pe_for_each_dev(pe, tmp) { 514 eeh_pe_for_each_dev(pe, edev, tmp) {
516 pdev = eeh_dev_to_pci_dev(tmp); 515 pdev = eeh_dev_to_pci_dev(edev);
517 if (pdev) 516 if (pdev)
518 pdev->error_state = pci_channel_io_frozen; 517 pdev->error_state = pci_channel_io_frozen;
519 } 518 }
@@ -579,7 +578,7 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
579 * blocked on normal path during the stage. So we need utilize 578 * blocked on normal path during the stage. So we need utilize
580 * eeh operations, which is always permitted. 579 * eeh operations, which is always permitted.
581 */ 580 */
582static void eeh_bridge_check_link(struct pci_dev *pdev, 581static void eeh_bridge_check_link(struct eeh_dev *edev,
583 struct device_node *dn) 582 struct device_node *dn)
584{ 583{
585 int cap; 584 int cap;
@@ -590,16 +589,17 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
590 * We only check root port and downstream ports of 589 * We only check root port and downstream ports of
591 * PCIe switches 590 * PCIe switches
592 */ 591 */
593 if (!pci_is_pcie(pdev) || 592 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
594 (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
595 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
596 return; 593 return;
597 594
598 pr_debug("%s: Check PCIe link for %s ...\n", 595 pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n",
599 __func__, pci_name(pdev)); 596 __func__, edev->phb->global_number,
597 edev->config_addr >> 8,
598 PCI_SLOT(edev->config_addr & 0xFF),
599 PCI_FUNC(edev->config_addr & 0xFF));
600 600
601 /* Check slot status */ 601 /* Check slot status */
602 cap = pdev->pcie_cap; 602 cap = edev->pcie_cap;
603 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); 603 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
604 if (!(val & PCI_EXP_SLTSTA_PDS)) { 604 if (!(val & PCI_EXP_SLTSTA_PDS)) {
605 pr_debug(" No card in the slot (0x%04x) !\n", val); 605 pr_debug(" No card in the slot (0x%04x) !\n", val);
@@ -653,8 +653,7 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
653#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 653#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
654#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 654#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
655 655
656static void eeh_restore_bridge_bars(struct pci_dev *pdev, 656static void eeh_restore_bridge_bars(struct eeh_dev *edev,
657 struct eeh_dev *edev,
658 struct device_node *dn) 657 struct device_node *dn)
659{ 658{
660 int i; 659 int i;
@@ -680,7 +679,7 @@ static void eeh_restore_bridge_bars(struct pci_dev *pdev,
680 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); 679 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
681 680
682 /* Check the PCIe link is ready */ 681 /* Check the PCIe link is ready */
683 eeh_bridge_check_link(pdev, dn); 682 eeh_bridge_check_link(edev, dn);
684} 683}
685 684
686static void eeh_restore_device_bars(struct eeh_dev *edev, 685static void eeh_restore_device_bars(struct eeh_dev *edev,
@@ -729,19 +728,12 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
729 */ 728 */
730static void *eeh_restore_one_device_bars(void *data, void *flag) 729static void *eeh_restore_one_device_bars(void *data, void *flag)
731{ 730{
732 struct pci_dev *pdev = NULL;
733 struct eeh_dev *edev = (struct eeh_dev *)data; 731 struct eeh_dev *edev = (struct eeh_dev *)data;
734 struct device_node *dn = eeh_dev_to_of_node(edev); 732 struct device_node *dn = eeh_dev_to_of_node(edev);
735 733
736 /* Trace the PCI bridge */ 734 /* Do special restore for bridges */
737 if (eeh_probe_mode_dev()) { 735 if (edev->mode & EEH_DEV_BRIDGE)
738 pdev = eeh_dev_to_pci_dev(edev); 736 eeh_restore_bridge_bars(edev, dn);
739 if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
740 pdev = NULL;
741 }
742
743 if (pdev)
744 eeh_restore_bridge_bars(pdev, edev, dn);
745 else 737 else
746 eeh_restore_device_bars(edev, dn); 738 eeh_restore_device_bars(edev, dn);
747 739
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index e7ae3484918c..5d753d4f2c75 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -56,19 +56,40 @@ EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
56 56
57void eeh_sysfs_add_device(struct pci_dev *pdev) 57void eeh_sysfs_add_device(struct pci_dev *pdev)
58{ 58{
59 struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
59 int rc=0; 60 int rc=0;
60 61
62 if (edev && (edev->mode & EEH_DEV_SYSFS))
63 return;
64
61 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); 65 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
62 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); 66 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
63 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 67 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
64 68
65 if (rc) 69 if (rc)
66 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); 70 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
71 else if (edev)
72 edev->mode |= EEH_DEV_SYSFS;
67} 73}
68 74
69void eeh_sysfs_remove_device(struct pci_dev *pdev) 75void eeh_sysfs_remove_device(struct pci_dev *pdev)
70{ 76{
77 struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
78
79 /*
80 * The parent directory might have been removed. We needn't
81 * continue for that case.
82 */
83 if (!pdev->dev.kobj.sd) {
84 if (edev)
85 edev->mode &= ~EEH_DEV_SYSFS;
86 return;
87 }
88
71 device_remove_file(&pdev->dev, &dev_attr_eeh_mode); 89 device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
72 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); 90 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
73 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 91 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
92
93 if (edev)
94 edev->mode &= ~EEH_DEV_SYSFS;
74} 95}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ab15b8d057ad..2bd0b885b0fe 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
449 449
450#ifdef CONFIG_PPC_BOOK3S_64 450#ifdef CONFIG_PPC_BOOK3S_64
451BEGIN_FTR_SECTION 451BEGIN_FTR_SECTION
452 /*
453 * Back up the TAR across context switches. Note that the TAR is not
454 * available for use in the kernel. (To provide this, the TAR should
455 * be backed up/restored on exception entry/exit instead, and be in
456 * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
457 */
458 mfspr r0,SPRN_TAR
459 std r0,THREAD_TAR(r3)
460
461 /* Event based branch registers */ 452 /* Event based branch registers */
462 mfspr r0, SPRN_BESCR 453 mfspr r0, SPRN_BESCR
463 std r0, THREAD_BESCR(r3) 454 std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
584 ld r7,DSCR_DEFAULT@toc(2) 575 ld r7,DSCR_DEFAULT@toc(2)
585 ld r0,THREAD_DSCR(r4) 576 ld r0,THREAD_DSCR(r4)
586 cmpwi r6,0 577 cmpwi r6,0
578 li r8, FSCR_DSCR
587 bne 1f 579 bne 1f
588 ld r0,0(r7) 580 ld r0,0(r7)
5891: cmpd r0,r25 581 b 3f
5821:
583 BEGIN_FTR_SECTION_NESTED(70)
584 mfspr r6, SPRN_FSCR
585 or r6, r6, r8
586 mtspr SPRN_FSCR, r6
587 BEGIN_FTR_SECTION_NESTED(69)
588 mfspr r6, SPRN_HFSCR
589 or r6, r6, r8
590 mtspr SPRN_HFSCR, r6
591 END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
592 b 4f
593 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
5943:
595 BEGIN_FTR_SECTION_NESTED(70)
596 mfspr r6, SPRN_FSCR
597 andc r6, r6, r8
598 mtspr SPRN_FSCR, r6
599 BEGIN_FTR_SECTION_NESTED(69)
600 mfspr r6, SPRN_HFSCR
601 andc r6, r6, r8
602 mtspr SPRN_HFSCR, r6
603 END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
604 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
6054: cmpd r0,r25
590 beq 2f 606 beq 2f
591 mtspr SPRN_DSCR,r0 607 mtspr SPRN_DSCR,r0
5922: 6082:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4e00d223b2e3..902ca3c6b4b6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
848 . = 0x4f80 848 . = 0x4f80
849 SET_SCRATCH0(r13) 849 SET_SCRATCH0(r13)
850 EXCEPTION_PROLOG_0(PACA_EXGEN) 850 EXCEPTION_PROLOG_0(PACA_EXGEN)
851 b facility_unavailable_relon_hv 851 b hv_facility_unavailable_relon_hv
852 852
853 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) 853 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
854#ifdef CONFIG_PPC_DENORMALISATION 854#ifdef CONFIG_PPC_DENORMALISATION
@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1175 b .ret_from_except 1175 b .ret_from_except
1176 1176
1177 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1177 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
1178 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
1178 1179
1179 .align 7 1180 .align 7
1180 .globl __end_handlers 1181 .globl __end_handlers
@@ -1188,7 +1189,7 @@ __end_handlers:
1188 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 1189 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1189 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 1190 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1190 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 1191 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1191 STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) 1192 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1192 1193
1193#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1194#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1194/* 1195/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2e51cde616d2..c69440cef7af 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -362,7 +362,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
362 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 362 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
363 seq_printf(p, " Spurious interrupts\n"); 363 seq_printf(p, " Spurious interrupts\n");
364 364
365 seq_printf(p, "%*s: ", prec, "CNT"); 365 seq_printf(p, "%*s: ", prec, "PMI");
366 for_each_online_cpu(j) 366 for_each_online_cpu(j)
367 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 367 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
368 seq_printf(p, " Performance monitoring interrupts\n"); 368 seq_printf(p, " Performance monitoring interrupts\n");
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f46914a0f33e..7d22a675fe1a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1462,6 +1462,8 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1462 /* Allocate bus and devices resources */ 1462 /* Allocate bus and devices resources */
1463 pcibios_allocate_bus_resources(bus); 1463 pcibios_allocate_bus_resources(bus);
1464 pcibios_claim_one_bus(bus); 1464 pcibios_claim_one_bus(bus);
1465 if (!pci_has_flag(PCI_PROBE_ONLY))
1466 pci_assign_unassigned_bus_resources(bus);
1465 1467
1466 /* Fixup EEH */ 1468 /* Fixup EEH */
1467 eeh_add_device_tree_late(bus); 1469 eeh_add_device_tree_late(bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 3f608800c06b..c1e17ae68a08 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -22,45 +22,40 @@
22#include <asm/eeh.h> 22#include <asm/eeh.h>
23 23
24/** 24/**
25 * __pcibios_remove_pci_devices - remove all devices under this bus 25 * pcibios_release_device - release PCI device
26 * @dev: PCI device
27 *
28 * The function is called before releasing the indicated PCI device.
29 */
30void pcibios_release_device(struct pci_dev *dev)
31{
32 eeh_remove_device(dev);
33}
34
35/**
36 * pcibios_remove_pci_devices - remove all devices under this bus
26 * @bus: the indicated PCI bus 37 * @bus: the indicated PCI bus
27 * @purge_pe: destroy the PE on removal of PCI devices
28 * 38 *
29 * Remove all of the PCI devices under this bus both from the 39 * Remove all of the PCI devices under this bus both from the
30 * linux pci device tree, and from the powerpc EEH address cache. 40 * linux pci device tree, and from the powerpc EEH address cache.
31 * By default, the corresponding PE will be destroied during the
32 * normal PCI hotplug path. For PCI hotplug during EEH recovery,
33 * the corresponding PE won't be destroied and deallocated.
34 */ 41 */
35void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe) 42void pcibios_remove_pci_devices(struct pci_bus *bus)
36{ 43{
37 struct pci_dev *dev, *tmp; 44 struct pci_dev *dev, *tmp;
38 struct pci_bus *child_bus; 45 struct pci_bus *child_bus;
39 46
40 /* First go down child busses */ 47 /* First go down child busses */
41 list_for_each_entry(child_bus, &bus->children, node) 48 list_for_each_entry(child_bus, &bus->children, node)
42 __pcibios_remove_pci_devices(child_bus, purge_pe); 49 pcibios_remove_pci_devices(child_bus);
43 50
44 pr_debug("PCI: Removing devices on bus %04x:%02x\n", 51 pr_debug("PCI: Removing devices on bus %04x:%02x\n",
45 pci_domain_nr(bus), bus->number); 52 pci_domain_nr(bus), bus->number);
46 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 53 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
47 pr_debug(" * Removing %s...\n", pci_name(dev)); 54 pr_debug(" Removing %s...\n", pci_name(dev));
48 eeh_remove_bus_device(dev, purge_pe);
49 pci_stop_and_remove_bus_device(dev); 55 pci_stop_and_remove_bus_device(dev);
50 } 56 }
51} 57}
52 58
53/**
54 * pcibios_remove_pci_devices - remove all devices under this bus
55 * @bus: the indicated PCI bus
56 *
57 * Remove all of the PCI devices under this bus both from the
58 * linux pci device tree, and from the powerpc EEH address cache.
59 */
60void pcibios_remove_pci_devices(struct pci_bus *bus)
61{
62 __pcibios_remove_pci_devices(bus, 1);
63}
64EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 59EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
65 60
66/** 61/**
@@ -76,7 +71,7 @@ EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
76 */ 71 */
77void pcibios_add_pci_devices(struct pci_bus * bus) 72void pcibios_add_pci_devices(struct pci_bus * bus)
78{ 73{
79 int slotno, num, mode, pass, max; 74 int slotno, mode, pass, max;
80 struct pci_dev *dev; 75 struct pci_dev *dev;
81 struct device_node *dn = pci_bus_to_OF_node(bus); 76 struct device_node *dn = pci_bus_to_OF_node(bus);
82 77
@@ -90,11 +85,15 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
90 /* use ofdt-based probe */ 85 /* use ofdt-based probe */
91 of_rescan_bus(dn, bus); 86 of_rescan_bus(dn, bus);
92 } else if (mode == PCI_PROBE_NORMAL) { 87 } else if (mode == PCI_PROBE_NORMAL) {
93 /* use legacy probe */ 88 /*
89 * Use legacy probe. In the partial hotplug case, we
90 * probably have grandchildren devices unplugged. So
91 * we don't check the return value from pci_scan_slot() in
92 * order for fully rescan all the way down to pick them up.
93 * They can have been removed during partial hotplug.
94 */
94 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); 95 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
95 num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); 96 pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
96 if (!num)
97 return;
98 pcibios_setup_bus_devices(bus); 97 pcibios_setup_bus_devices(bus);
99 max = bus->busn_res.start; 98 max = bus->busn_res.start;
100 for (pass = 0; pass < 2; pass++) { 99 for (pass = 0; pass < 2; pass++) {
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 6b0ba5854d99..15d9105323bf 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -230,11 +230,14 @@ void of_scan_pci_bridge(struct pci_dev *dev)
230 return; 230 return;
231 } 231 }
232 232
233 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 233 bus = pci_find_bus(pci_domain_nr(dev->bus), busrange[0]);
234 if (!bus) { 234 if (!bus) {
235 printk(KERN_ERR "Failed to create pci bus for %s\n", 235 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
236 node->full_name); 236 if (!bus) {
237 return; 237 printk(KERN_ERR "Failed to create pci bus for %s\n",
238 node->full_name);
239 return;
240 }
238 } 241 }
239 242
240 bus->primary = dev->bus->number; 243 bus->primary = dev->bus->number;
@@ -292,6 +295,38 @@ void of_scan_pci_bridge(struct pci_dev *dev)
292} 295}
293EXPORT_SYMBOL(of_scan_pci_bridge); 296EXPORT_SYMBOL(of_scan_pci_bridge);
294 297
298static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
299 struct device_node *dn)
300{
301 struct pci_dev *dev = NULL;
302 const u32 *reg;
303 int reglen, devfn;
304
305 pr_debug(" * %s\n", dn->full_name);
306 if (!of_device_is_available(dn))
307 return NULL;
308
309 reg = of_get_property(dn, "reg", &reglen);
310 if (reg == NULL || reglen < 20)
311 return NULL;
312 devfn = (reg[0] >> 8) & 0xff;
313
314 /* Check if the PCI device is already there */
315 dev = pci_get_slot(bus, devfn);
316 if (dev) {
317 pci_dev_put(dev);
318 return dev;
319 }
320
321 /* create a new pci_dev for this device */
322 dev = of_create_pci_dev(dn, bus, devfn);
323 if (!dev)
324 return NULL;
325
326 pr_debug(" dev header type: %x\n", dev->hdr_type);
327 return dev;
328}
329
295/** 330/**
296 * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices 331 * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices
297 * @node: device tree node for the PCI bus 332 * @node: device tree node for the PCI bus
@@ -302,8 +337,6 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
302 int rescan_existing) 337 int rescan_existing)
303{ 338{
304 struct device_node *child; 339 struct device_node *child;
305 const u32 *reg;
306 int reglen, devfn;
307 struct pci_dev *dev; 340 struct pci_dev *dev;
308 341
309 pr_debug("of_scan_bus(%s) bus no %d...\n", 342 pr_debug("of_scan_bus(%s) bus no %d...\n",
@@ -311,16 +344,7 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
311 344
312 /* Scan direct children */ 345 /* Scan direct children */
313 for_each_child_of_node(node, child) { 346 for_each_child_of_node(node, child) {
314 pr_debug(" * %s\n", child->full_name); 347 dev = of_scan_pci_dev(bus, child);
315 if (!of_device_is_available(child))
316 continue;
317 reg = of_get_property(child, "reg", &reglen);
318 if (reg == NULL || reglen < 20)
319 continue;
320 devfn = (reg[0] >> 8) & 0xff;
321
322 /* create a new pci_dev for this device */
323 dev = of_create_pci_dev(child, bus, devfn);
324 if (!dev) 348 if (!dev)
325 continue; 349 continue;
326 pr_debug(" dev header type: %x\n", dev->hdr_type); 350 pr_debug(" dev header type: %x\n", dev->hdr_type);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c517dbe705fd..8083be20fe5e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
600 struct ppc64_tlb_batch *batch; 600 struct ppc64_tlb_batch *batch;
601#endif 601#endif
602 602
603 /* Back up the TAR across context switches.
604 * Note that the TAR is not available for use in the kernel. (To
605 * provide this, the TAR should be backed up/restored on exception
606 * entry/exit instead, and be in pt_regs. FIXME, this should be in
607 * pt_regs anyway (for debug).)
608 * Save the TAR here before we do treclaim/trecheckpoint as these
609 * will change the TAR.
610 */
611 save_tar(&prev->thread);
612
603 __switch_to_tm(prev); 613 __switch_to_tm(prev);
604 614
605#ifdef CONFIG_SMP 615#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5eccda9fd33f..607902424e73 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -644,7 +644,8 @@ unsigned char ibm_architecture_vec[] = {
644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ 644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
645 W(0xffff0000), W(0x003e0000), /* POWER6 */ 645 W(0xffff0000), W(0x003e0000), /* POWER6 */
646 W(0xffff0000), W(0x003f0000), /* POWER7 */ 646 W(0xffff0000), W(0x003f0000), /* POWER7 */
647 W(0xffff0000), W(0x004b0000), /* POWER8 */ 647 W(0xffff0000), W(0x004b0000), /* POWER8E */
648 W(0xffff0000), W(0x004d0000), /* POWER8 */
648 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ 649 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
649 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ 650 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
650 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ 651 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
@@ -706,7 +707,7 @@ unsigned char ibm_architecture_vec[] = {
706 * must match by the macro below. Update the definition if 707 * must match by the macro below. Update the definition if
707 * the structure layout changes. 708 * the structure layout changes.
708 */ 709 */
709#define IBM_ARCH_VEC_NRCORES_OFFSET 117 710#define IBM_ARCH_VEC_NRCORES_OFFSET 125
710 W(NR_CPUS), /* number of cores supported */ 711 W(NR_CPUS), /* number of cores supported */
711 0, 712 0,
712 0, 713 0,
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 51be8fb24803..0554d1f6d70d 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -233,6 +233,16 @@ dont_backup_fp:
233 std r5, _CCR(r7) 233 std r5, _CCR(r7)
234 std r6, _XER(r7) 234 std r6, _XER(r7)
235 235
236
237 /* ******************** TAR, PPR, DSCR ********** */
238 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR
240 mfspr r5, SPRN_DSCR
241
242 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245
236 /* MSR and flags: We don't change CRs, and we don't need to alter 246 /* MSR and flags: We don't change CRs, and we don't need to alter
237 * MSR. 247 * MSR.
238 */ 248 */
@@ -347,6 +357,16 @@ dont_restore_fp:
347 mtmsr r6 /* FP/Vec off again! */ 357 mtmsr r6 /* FP/Vec off again! */
348 358
349restore_gprs: 359restore_gprs:
360
361 /* ******************** TAR, PPR, DSCR ********** */
362 ld r4, THREAD_TM_TAR(r3)
363 ld r5, THREAD_TM_PPR(r3)
364 ld r6, THREAD_TM_DSCR(r3)
365
366 mtspr SPRN_TAR, r4
367 mtspr SPRN_PPR, r5
368 mtspr SPRN_DSCR, r6
369
350 /* ******************** CR,LR,CCR,MSR ********** */ 370 /* ******************** CR,LR,CCR,MSR ********** */
351 ld r3, _CTR(r7) 371 ld r3, _CTR(r7)
352 ld r4, _LINK(r7) 372 ld r4, _LINK(r7)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bf33c22e38a4..e435bc089ea3 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
44#include <asm/machdep.h> 44#include <asm/machdep.h>
45#include <asm/rtas.h> 45#include <asm/rtas.h>
46#include <asm/pmc.h> 46#include <asm/pmc.h>
47#ifdef CONFIG_PPC32
48#include <asm/reg.h> 47#include <asm/reg.h>
49#endif
50#ifdef CONFIG_PMAC_BACKLIGHT 48#ifdef CONFIG_PMAC_BACKLIGHT
51#include <asm/backlight.h> 49#include <asm/backlight.h>
52#endif 50#endif
@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
1296 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1294 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1297} 1295}
1298 1296
1297#ifdef CONFIG_PPC64
1299void facility_unavailable_exception(struct pt_regs *regs) 1298void facility_unavailable_exception(struct pt_regs *regs)
1300{ 1299{
1301 static char *facility_strings[] = { 1300 static char *facility_strings[] = {
1302 "FPU", 1301 [FSCR_FP_LG] = "FPU",
1303 "VMX/VSX", 1302 [FSCR_VECVSX_LG] = "VMX/VSX",
1304 "DSCR", 1303 [FSCR_DSCR_LG] = "DSCR",
1305 "PMU SPRs", 1304 [FSCR_PM_LG] = "PMU SPRs",
1306 "BHRB", 1305 [FSCR_BHRB_LG] = "BHRB",
1307 "TM", 1306 [FSCR_TM_LG] = "TM",
1308 "AT", 1307 [FSCR_EBB_LG] = "EBB",
1309 "EBB", 1308 [FSCR_TAR_LG] = "TAR",
1310 "TAR",
1311 }; 1309 };
1312 char *facility, *prefix; 1310 char *facility = "unknown";
1313 u64 value; 1311 u64 value;
1312 u8 status;
1313 bool hv;
1314 1314
1315 if (regs->trap == 0xf60) { 1315 hv = (regs->trap == 0xf80);
1316 value = mfspr(SPRN_FSCR); 1316 if (hv)
1317 prefix = "";
1318 } else {
1319 value = mfspr(SPRN_HFSCR); 1317 value = mfspr(SPRN_HFSCR);
1320 prefix = "Hypervisor "; 1318 else
1319 value = mfspr(SPRN_FSCR);
1320
1321 status = value >> 56;
1322 if (status == FSCR_DSCR_LG) {
1323 /* User is acessing the DSCR. Set the inherit bit and allow
1324 * the user to set it directly in future by setting via the
1325 * H/FSCR DSCR bit.
1326 */
1327 current->thread.dscr_inherit = 1;
1328 if (hv)
1329 mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
1330 else
1331 mtspr(SPRN_FSCR, value | FSCR_DSCR);
1332 return;
1321 } 1333 }
1322 1334
1323 value = value >> 56; 1335 if ((status < ARRAY_SIZE(facility_strings)) &&
1336 facility_strings[status])
1337 facility = facility_strings[status];
1324 1338
1325 /* We restore the interrupt state now */ 1339 /* We restore the interrupt state now */
1326 if (!arch_irq_disabled_regs(regs)) 1340 if (!arch_irq_disabled_regs(regs))
1327 local_irq_enable(); 1341 local_irq_enable();
1328 1342
1329 if (value < ARRAY_SIZE(facility_strings))
1330 facility = facility_strings[value];
1331 else
1332 facility = "unknown";
1333
1334 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1343 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1335 prefix, facility, regs->nip, regs->msr); 1344 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1336 1345
1337 if (user_mode(regs)) { 1346 if (user_mode(regs)) {
1338 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1347 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
1341 1350
1342 die("Unexpected facility unavailable exception", regs, SIGABRT); 1351 die("Unexpected facility unavailable exception", regs, SIGABRT);
1343} 1352}
1353#endif
1344 1354
1345#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1355#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1346 1356
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479802f2..f096e72262f4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
38#endif 38#endif
39SECTIONS 39SECTIONS
40{ 40{
41 . = 0;
42 reloc_start = .;
43
44 . = KERNELBASE; 41 . = KERNELBASE;
45 42
46/* 43/*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dde741a..7629cd3eb91a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1809 rma_size <<= PAGE_SHIFT; 1809 rma_size <<= PAGE_SHIFT;
1810 rmls = lpcr_rmls(rma_size); 1810 rmls = lpcr_rmls(rma_size);
1811 err = -EINVAL; 1811 err = -EINVAL;
1812 if (rmls < 0) { 1812 if ((long)rmls < 0) {
1813 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); 1813 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1814 goto out_srcu; 1814 goto out_srcu;
1815 } 1815 }
@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1874 /* Allocate the guest's logical partition ID */ 1874 /* Allocate the guest's logical partition ID */
1875 1875
1876 lpid = kvmppc_alloc_lpid(); 1876 lpid = kvmppc_alloc_lpid();
1877 if (lpid < 0) 1877 if ((long)lpid < 0)
1878 return -ENOMEM; 1878 return -ENOMEM;
1879 kvm->arch.lpid = lpid; 1879 kvm->arch.lpid = lpid;
1880 1880
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 19498a567a81..c6e13d9a9e15 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1047 if (err) 1047 if (err)
1048 goto free_shadow_vcpu; 1048 goto free_shadow_vcpu;
1049 1049
1050 err = -ENOMEM;
1050 p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1051 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1051 /* the real shared page fills the last 4k of our page */
1052 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1053 if (!p) 1052 if (!p)
1054 goto uninit_vcpu; 1053 goto uninit_vcpu;
1054 /* the real shared page fills the last 4k of our page */
1055 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1055 1056
1056#ifdef CONFIG_PPC_BOOK3S_64 1057#ifdef CONFIG_PPC_BOOK3S_64
1057 /* default to book3s_64 (970fx) */ 1058 /* default to book3s_64 (970fx) */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 3f0c30ae4791..c33d939120c9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -43,6 +43,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43{ 43{
44 unsigned long va; 44 unsigned long va;
45 unsigned int penc; 45 unsigned int penc;
46 unsigned long sllp;
46 47
47 /* 48 /*
48 * We need 14 to 65 bits of va for a tlibe of 4K page 49 * We need 14 to 65 bits of va for a tlibe of 4K page
@@ -64,7 +65,9 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
64 /* clear out bits after (52) [0....52.....63] */ 65 /* clear out bits after (52) [0....52.....63] */
65 va &= ~((1ul << (64 - 52)) - 1); 66 va &= ~((1ul << (64 - 52)) - 1);
66 va |= ssize << 8; 67 va |= ssize << 8;
67 va |= mmu_psize_defs[apsize].sllp << 6; 68 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
69 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
70 va |= sllp << 5;
68 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 71 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
69 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 72 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
70 : "memory"); 73 : "memory");
@@ -98,6 +101,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
98{ 101{
99 unsigned long va; 102 unsigned long va;
100 unsigned int penc; 103 unsigned int penc;
104 unsigned long sllp;
101 105
102 /* VPN_SHIFT can be atmost 12 */ 106 /* VPN_SHIFT can be atmost 12 */
103 va = vpn << VPN_SHIFT; 107 va = vpn << VPN_SHIFT;
@@ -113,7 +117,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
113 /* clear out bits after(52) [0....52.....63] */ 117 /* clear out bits after(52) [0....52.....63] */
114 va &= ~((1ul << (64 - 52)) - 1); 118 va &= ~((1ul << (64 - 52)) - 1);
115 va |= ssize << 8; 119 va |= ssize << 8;
116 va |= mmu_psize_defs[apsize].sllp << 6; 120 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
121 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
122 va |= sllp << 5;
117 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 123 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
118 : : "r"(va) : "memory"); 124 : : "r"(va) : "memory");
119 break; 125 break;
@@ -554,6 +560,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
554 seg_off |= vpi << shift; 560 seg_off |= vpi << shift;
555 } 561 }
556 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; 562 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
563 break;
557 case MMU_SEGSIZE_1T: 564 case MMU_SEGSIZE_1T:
558 /* We only have 40 - 23 bits of seg_off in avpn */ 565 /* We only have 40 - 23 bits of seg_off in avpn */
559 seg_off = (avpn & 0x1ffff) << 23; 566 seg_off = (avpn & 0x1ffff) << 23;
@@ -563,6 +570,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
563 seg_off |= vpi << shift; 570 seg_off |= vpi << shift;
564 } 571 }
565 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; 572 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
573 break;
566 default: 574 default:
567 *vpn = size = 0; 575 *vpn = size = 0;
568 } 576 }
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 08397217e8ac..5850798826cd 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,6 +27,7 @@
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <asm/cputhreads.h>
30#include <asm/sparsemem.h> 31#include <asm/sparsemem.h>
31#include <asm/prom.h> 32#include <asm/prom.h>
32#include <asm/smp.h> 33#include <asm/smp.h>
@@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void)
1318 } 1319 }
1319 } 1320 }
1320 if (changed) { 1321 if (changed) {
1321 cpumask_set_cpu(cpu, changes); 1322 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1323 cpu = cpu_last_thread_sibling(cpu);
1322 } 1324 }
1323 } 1325 }
1324 1326
@@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data)
1426 if (!data) 1428 if (!data)
1427 return -EINVAL; 1429 return -EINVAL;
1428 1430
1429 cpu = get_cpu(); 1431 cpu = smp_processor_id();
1430 1432
1431 for (update = data; update; update = update->next) { 1433 for (update = data; update; update = update->next) {
1432 if (cpu != update->cpu) 1434 if (cpu != update->cpu)
@@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data)
1446 */ 1448 */
1447int arch_update_cpu_topology(void) 1449int arch_update_cpu_topology(void)
1448{ 1450{
1449 unsigned int cpu, changed = 0; 1451 unsigned int cpu, sibling, changed = 0;
1450 struct topology_update_data *updates, *ud; 1452 struct topology_update_data *updates, *ud;
1451 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1452 cpumask_t updated_cpus; 1454 cpumask_t updated_cpus;
1453 struct device *dev; 1455 struct device *dev;
1454 int weight, i = 0; 1456 int weight, new_nid, i = 0;
1455 1457
1456 weight = cpumask_weight(&cpu_associativity_changes_mask); 1458 weight = cpumask_weight(&cpu_associativity_changes_mask);
1457 if (!weight) 1459 if (!weight)
@@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void)
1464 cpumask_clear(&updated_cpus); 1466 cpumask_clear(&updated_cpus);
1465 1467
1466 for_each_cpu(cpu, &cpu_associativity_changes_mask) { 1468 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1467 ud = &updates[i++]; 1469 /*
1468 ud->cpu = cpu; 1470 * If siblings aren't flagged for changes, updates list
1469 vphn_get_associativity(cpu, associativity); 1471 * will be too short. Skip on this update and set for next
1470 ud->new_nid = associativity_to_nid(associativity); 1472 * update.
1471 1473 */
1472 if (ud->new_nid < 0 || !node_online(ud->new_nid)) 1474 if (!cpumask_subset(cpu_sibling_mask(cpu),
1473 ud->new_nid = first_online_node; 1475 &cpu_associativity_changes_mask)) {
1476 pr_info("Sibling bits not set for associativity "
1477 "change, cpu%d\n", cpu);
1478 cpumask_or(&cpu_associativity_changes_mask,
1479 &cpu_associativity_changes_mask,
1480 cpu_sibling_mask(cpu));
1481 cpu = cpu_last_thread_sibling(cpu);
1482 continue;
1483 }
1474 1484
1475 ud->old_nid = numa_cpu_lookup_table[cpu]; 1485 /* Use associativity from first thread for all siblings */
1476 cpumask_set_cpu(cpu, &updated_cpus); 1486 vphn_get_associativity(cpu, associativity);
1487 new_nid = associativity_to_nid(associativity);
1488 if (new_nid < 0 || !node_online(new_nid))
1489 new_nid = first_online_node;
1490
1491 if (new_nid == numa_cpu_lookup_table[cpu]) {
1492 cpumask_andnot(&cpu_associativity_changes_mask,
1493 &cpu_associativity_changes_mask,
1494 cpu_sibling_mask(cpu));
1495 cpu = cpu_last_thread_sibling(cpu);
1496 continue;
1497 }
1477 1498
1478 if (i < weight) 1499 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1479 ud->next = &updates[i]; 1500 ud = &updates[i++];
1501 ud->cpu = sibling;
1502 ud->new_nid = new_nid;
1503 ud->old_nid = numa_cpu_lookup_table[sibling];
1504 cpumask_set_cpu(sibling, &updated_cpus);
1505 if (i < weight)
1506 ud->next = &updates[i];
1507 }
1508 cpu = cpu_last_thread_sibling(cpu);
1480 } 1509 }
1481 1510
1482 stop_machine(update_cpu_topology, &updates[0], &updated_cpus); 1511 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a3985aee77fe..eeae308cf982 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -484,7 +484,7 @@ static bool is_ebb_event(struct perf_event *event)
484 * use bit 63 of the event code for something else if they wish. 484 * use bit 63 of the event code for something else if they wish.
485 */ 485 */
486 return (ppmu->flags & PPMU_EBB) && 486 return (ppmu->flags & PPMU_EBB) &&
487 ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1); 487 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
488} 488}
489 489
490static int ebb_event_check(struct perf_event *event) 490static int ebb_event_check(struct perf_event *event)
@@ -1252,8 +1252,11 @@ nocheck:
1252 1252
1253 ret = 0; 1253 ret = 0;
1254 out: 1254 out:
1255 if (has_branch_stack(event)) 1255 if (has_branch_stack(event)) {
1256 power_pmu_bhrb_enable(event); 1256 power_pmu_bhrb_enable(event);
1257 cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1258 event->attr.branch_sample_type);
1259 }
1257 1260
1258 perf_pmu_enable(event->pmu); 1261 perf_pmu_enable(event->pmu);
1259 local_irq_restore(flags); 1262 local_irq_restore(flags);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 96a64d6a8bdf..2ee4a707f0df 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -118,7 +118,7 @@
118 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ 118 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
119 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ 119 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
120 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ 120 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
121 (EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \ 121 (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \
122 EVENT_PSEL_MASK) 122 EVENT_PSEL_MASK)
123 123
124/* MMCRA IFM bits - POWER8 */ 124/* MMCRA IFM bits - POWER8 */
@@ -233,10 +233,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
233 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; 233 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
234 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; 234 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
235 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; 235 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
236 ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; 236 ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
237 237
238 /* Clear the EBB bit in the event, so event checks work below */ 238 /* Clear the EBB bit in the event, so event checks work below */
239 event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT); 239 event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT);
240 240
241 if (pmc) { 241 if (pmc) {
242 if (pmc > 6) 242 if (pmc > 6)
@@ -561,18 +561,13 @@ static int power8_generic_events[] = {
561static u64 power8_bhrb_filter_map(u64 branch_sample_type) 561static u64 power8_bhrb_filter_map(u64 branch_sample_type)
562{ 562{
563 u64 pmu_bhrb_filter = 0; 563 u64 pmu_bhrb_filter = 0;
564 u64 br_privilege = branch_sample_type & ONLY_PLM;
565 564
566 /* BHRB and regular PMU events share the same prvillege state 565 /* BHRB and regular PMU events share the same privilege state
567 * filter configuration. BHRB is always recorded along with a 566 * filter configuration. BHRB is always recorded along with a
568 * regular PMU event. So privilege state filter criteria for BHRB 567 * regular PMU event. As the privilege state filter is handled
569 * and the companion PMU events has to be the same. As a default 568 * in the basic PMC configuration of the accompanying regular
570 * "perf record" tool sets all privillege bits ON when no filter 569 * PMU event, we ignore any separate BHRB specific request.
571 * criteria is provided in the command line. So as along as all
572 * privillege bits are ON or they are OFF, we are good to go.
573 */ 570 */
574 if ((br_privilege != 7) && (br_privilege != 0))
575 return -1;
576 571
577 /* No branch filter requested */ 572 /* No branch filter requested */
578 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) 573 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
@@ -621,10 +616,19 @@ static struct power_pmu power8_pmu = {
621 616
622static int __init init_power8_pmu(void) 617static int __init init_power8_pmu(void)
623{ 618{
619 int rc;
620
624 if (!cur_cpu_spec->oprofile_cpu_type || 621 if (!cur_cpu_spec->oprofile_cpu_type ||
625 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8")) 622 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
626 return -ENODEV; 623 return -ENODEV;
627 624
628 return register_power_pmu(&power8_pmu); 625 rc = register_power_pmu(&power8_pmu);
626 if (rc)
627 return rc;
628
629 /* Tell userspace that EBB is supported */
630 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
631
632 return 0;
629} 633}
630early_initcall(init_power8_pmu); 634early_initcall(init_power8_pmu);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 969cce73055a..79663d26e6ea 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -114,7 +114,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
114 * the root bridge. So it's not reasonable to continue 114 * the root bridge. So it's not reasonable to continue
115 * the probing. 115 * the probing.
116 */ 116 */
117 if (!dn || !edev) 117 if (!dn || !edev || edev->pe)
118 return 0; 118 return 0;
119 119
120 /* Skip for PCI-ISA bridge */ 120 /* Skip for PCI-ISA bridge */
@@ -122,8 +122,19 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
122 return 0; 122 return 0;
123 123
124 /* Initialize eeh device */ 124 /* Initialize eeh device */
125 edev->class_code = dev->class; 125 edev->class_code = dev->class;
126 edev->mode = 0; 126 edev->mode &= 0xFFFFFF00;
127 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
128 edev->mode |= EEH_DEV_BRIDGE;
129 if (pci_is_pcie(dev)) {
130 edev->pcie_cap = pci_pcie_cap(dev);
131
132 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
133 edev->mode |= EEH_DEV_ROOT_PORT;
134 else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
135 edev->mode |= EEH_DEV_DS_PORT;
136 }
137
127 edev->config_addr = ((dev->bus->number << 8) | dev->devfn); 138 edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
128 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff); 139 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
129 140
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 49b57b9f835d..d8140b125e62 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1266,7 +1266,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1266 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE); 1266 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1267} 1267}
1268 1268
1269void pnv_pci_init_ioda2_phb(struct device_node *np) 1269void __init pnv_pci_init_ioda2_phb(struct device_node *np)
1270{ 1270{
1271 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); 1271 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
1272} 1272}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 1bd3399146ed..62b4f8025de0 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,7 +19,6 @@ config PPC_PSERIES
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG if SMP
23 select HOTPLUG_CPU if SMP 22 select HOTPLUG_CPU if SMP
24 default y 23 default y
25 24
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index b456b157d33d..7fbc25b1813f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -133,6 +133,48 @@ static int pseries_eeh_init(void)
133 return 0; 133 return 0;
134} 134}
135 135
136static int pseries_eeh_cap_start(struct device_node *dn)
137{
138 struct pci_dn *pdn = PCI_DN(dn);
139 u32 status;
140
141 if (!pdn)
142 return 0;
143
144 rtas_read_config(pdn, PCI_STATUS, 2, &status);
145 if (!(status & PCI_STATUS_CAP_LIST))
146 return 0;
147
148 return PCI_CAPABILITY_LIST;
149}
150
151
152static int pseries_eeh_find_cap(struct device_node *dn, int cap)
153{
154 struct pci_dn *pdn = PCI_DN(dn);
155 int pos = pseries_eeh_cap_start(dn);
156 int cnt = 48; /* Maximal number of capabilities */
157 u32 id;
158
159 if (!pos)
160 return 0;
161
162 while (cnt--) {
163 rtas_read_config(pdn, pos, 1, &pos);
164 if (pos < 0x40)
165 break;
166 pos &= ~3;
167 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
168 if (id == 0xff)
169 break;
170 if (id == cap)
171 return pos;
172 pos += PCI_CAP_LIST_NEXT;
173 }
174
175 return 0;
176}
177
136/** 178/**
137 * pseries_eeh_of_probe - EEH probe on the given device 179 * pseries_eeh_of_probe - EEH probe on the given device
138 * @dn: OF node 180 * @dn: OF node
@@ -146,14 +188,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
146{ 188{
147 struct eeh_dev *edev; 189 struct eeh_dev *edev;
148 struct eeh_pe pe; 190 struct eeh_pe pe;
191 struct pci_dn *pdn = PCI_DN(dn);
149 const u32 *class_code, *vendor_id, *device_id; 192 const u32 *class_code, *vendor_id, *device_id;
150 const u32 *regs; 193 const u32 *regs;
194 u32 pcie_flags;
151 int enable = 0; 195 int enable = 0;
152 int ret; 196 int ret;
153 197
154 /* Retrieve OF node and eeh device */ 198 /* Retrieve OF node and eeh device */
155 edev = of_node_to_eeh_dev(dn); 199 edev = of_node_to_eeh_dev(dn);
156 if (!of_device_is_available(dn)) 200 if (edev->pe || !of_device_is_available(dn))
157 return NULL; 201 return NULL;
158 202
159 /* Retrieve class/vendor/device IDs */ 203 /* Retrieve class/vendor/device IDs */
@@ -167,9 +211,26 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
167 if (dn->type && !strcmp(dn->type, "isa")) 211 if (dn->type && !strcmp(dn->type, "isa"))
168 return NULL; 212 return NULL;
169 213
170 /* Update class code and mode of eeh device */ 214 /*
215 * Update class code and mode of eeh device. We need
216 * correctly reflects that current device is root port
217 * or PCIe switch downstream port.
218 */
171 edev->class_code = *class_code; 219 edev->class_code = *class_code;
172 edev->mode = 0; 220 edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
221 edev->mode &= 0xFFFFFF00;
222 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
223 edev->mode |= EEH_DEV_BRIDGE;
224 if (edev->pcie_cap) {
225 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
226 2, &pcie_flags);
227 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
228 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
229 edev->mode |= EEH_DEV_ROOT_PORT;
230 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
231 edev->mode |= EEH_DEV_DS_PORT;
232 }
233 }
173 234
174 /* Retrieve the device address */ 235 /* Retrieve the device address */
175 regs = of_get_property(dn, "reg", NULL); 236 regs = of_get_property(dn, "reg", NULL);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 02d6e21619bb..8bad880bd177 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -146,7 +146,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
146 flags = 0; 146 flags = 0;
147 147
148 /* Make pHyp happy */ 148 /* Make pHyp happy */
149 if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) 149 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
150 hpte_r &= ~_PAGE_COHERENT; 150 hpte_r &= ~_PAGE_COHERENT;
151 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) 151 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
152 flags |= H_COALESCE_CAND; 152 flags |= H_COALESCE_CAND;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8671a44551..6a5f2b1f32ca 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -569,35 +569,6 @@ error:
569 return ret; 569 return ret;
570} 570}
571 571
572static int unzip_oops(char *oops_buf, char *big_buf)
573{
574 struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
575 u64 timestamp = oops_hdr->timestamp;
576 char *big_oops_data = NULL;
577 char *oops_data_buf = NULL;
578 size_t big_oops_data_sz;
579 int unzipped_len;
580
581 big_oops_data = big_buf + sizeof(struct oops_log_info);
582 big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
583 oops_data_buf = oops_buf + sizeof(struct oops_log_info);
584
585 unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
586 oops_hdr->report_length,
587 big_oops_data_sz);
588
589 if (unzipped_len < 0) {
590 pr_err("nvram: decompression failed; returned %d\n",
591 unzipped_len);
592 return -1;
593 }
594 oops_hdr = (struct oops_log_info *)big_buf;
595 oops_hdr->version = OOPS_HDR_VERSION;
596 oops_hdr->report_length = (u16) unzipped_len;
597 oops_hdr->timestamp = timestamp;
598 return 0;
599}
600
601static int nvram_pstore_open(struct pstore_info *psi) 572static int nvram_pstore_open(struct pstore_info *psi)
602{ 573{
603 /* Reset the iterator to start reading partitions again */ 574 /* Reset the iterator to start reading partitions again */
@@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
685 unsigned int err_type, id_no, size = 0; 656 unsigned int err_type, id_no, size = 0;
686 struct nvram_os_partition *part = NULL; 657 struct nvram_os_partition *part = NULL;
687 char *buff = NULL, *big_buff = NULL; 658 char *buff = NULL, *big_buff = NULL;
688 int rc, sig = 0; 659 int sig = 0;
689 loff_t p; 660 loff_t p;
690 661
691read_partition:
692 read_type++; 662 read_type++;
693 663
694 switch (nvram_type_ids[read_type]) { 664 switch (nvram_type_ids[read_type]) {
@@ -749,30 +719,46 @@ read_partition:
749 *id = id_no; 719 *id = id_no;
750 720
751 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { 721 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
722 int length, unzipped_len;
723 size_t hdr_size;
724
752 oops_hdr = (struct oops_log_info *)buff; 725 oops_hdr = (struct oops_log_info *)buff;
753 *buf = buff + sizeof(*oops_hdr); 726 if (oops_hdr->version < OOPS_HDR_VERSION) {
727 /* Old format oops header had 2-byte record size */
728 hdr_size = sizeof(u16);
729 length = oops_hdr->version;
730 time->tv_sec = 0;
731 time->tv_nsec = 0;
732 } else {
733 hdr_size = sizeof(*oops_hdr);
734 length = oops_hdr->report_length;
735 time->tv_sec = oops_hdr->timestamp;
736 time->tv_nsec = 0;
737 }
738 *buf = kmalloc(length, GFP_KERNEL);
739 if (*buf == NULL)
740 return -ENOMEM;
741 memcpy(*buf, buff + hdr_size, length);
742 kfree(buff);
754 743
755 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { 744 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
756 big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); 745 big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
757 if (!big_buff) 746 if (!big_buff)
758 return -ENOMEM; 747 return -ENOMEM;
759 748
760 rc = unzip_oops(buff, big_buff); 749 unzipped_len = nvram_decompress(*buf, big_buff,
750 length, big_oops_buf_sz);
761 751
762 if (rc != 0) { 752 if (unzipped_len < 0) {
763 kfree(buff); 753 pr_err("nvram: decompression failed, returned "
754 "rc %d\n", unzipped_len);
764 kfree(big_buff); 755 kfree(big_buff);
765 goto read_partition; 756 } else {
757 *buf = big_buff;
758 length = unzipped_len;
766 } 759 }
767
768 oops_hdr = (struct oops_log_info *)big_buff;
769 *buf = big_buff + sizeof(*oops_hdr);
770 kfree(buff);
771 } 760 }
772 761 return length;
773 time->tv_sec = oops_hdr->timestamp;
774 time->tv_nsec = 0;
775 return oops_hdr->report_length;
776 } 762 }
777 763
778 *buf = buff; 764 *buf = buff;
@@ -816,6 +802,7 @@ static int nvram_pstore_init(void)
816static void __init nvram_init_oops_partition(int rtas_partition_exists) 802static void __init nvram_init_oops_partition(int rtas_partition_exists)
817{ 803{
818 int rc; 804 int rc;
805 size_t size;
819 806
820 rc = pseries_nvram_init_os_partition(&oops_log_partition); 807 rc = pseries_nvram_init_os_partition(&oops_log_partition);
821 if (rc != 0) { 808 if (rc != 0) {
@@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
844 big_oops_buf_sz = (oops_data_sz * 100) / 45; 831 big_oops_buf_sz = (oops_data_sz * 100) / 45;
845 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 832 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
846 if (big_oops_buf) { 833 if (big_oops_buf) {
847 stream.workspace = kmalloc(zlib_deflate_workspacesize( 834 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
848 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); 835 zlib_inflate_workspacesize());
836 stream.workspace = kmalloc(size, GFP_KERNEL);
849 if (!stream.workspace) { 837 if (!stream.workspace) {
850 pr_err("nvram: No memory for compression workspace; " 838 pr_err("nvram: No memory for compression workspace; "
851 "skipping compression of %s partition data\n", 839 "skipping compression of %s partition data\n",
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 7b3cbde8c783..721c0586b284 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -287,6 +287,9 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
287 unsigned long *savep; 287 unsigned long *savep;
288 struct rtas_error_log *h, *errhdr = NULL; 288 struct rtas_error_log *h, *errhdr = NULL;
289 289
290 /* Mask top two bits */
291 regs->gpr[3] &= ~(0x3UL << 62);
292
290 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { 293 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
291 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); 294 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
292 return NULL; 295 return NULL;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 22f75b504f7f..8a4cae78f03c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@ config S390
118 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 118 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
119 select HAVE_KERNEL_BZIP2 119 select HAVE_KERNEL_BZIP2
120 select HAVE_KERNEL_GZIP 120 select HAVE_KERNEL_GZIP
121 select HAVE_KERNEL_LZ4
121 select HAVE_KERNEL_LZMA 122 select HAVE_KERNEL_LZMA
122 select HAVE_KERNEL_LZO 123 select HAVE_KERNEL_LZO
123 select HAVE_KERNEL_XZ 124 select HAVE_KERNEL_XZ
@@ -227,11 +228,12 @@ config MARCH_Z196
227 not work on older machines. 228 not work on older machines.
228 229
229config MARCH_ZEC12 230config MARCH_ZEC12
230 bool "IBM zEC12" 231 bool "IBM zBC12 and zEC12"
231 select HAVE_MARCH_ZEC12_FEATURES if 64BIT 232 select HAVE_MARCH_ZEC12_FEATURES if 64BIT
232 help 233 help
233 Select this to enable optimizations for IBM zEC12 (2827 series). The 234 Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
234 kernel will be slightly faster but will not work on older machines. 235 2827 series). The kernel will be slightly faster but will not work on
236 older machines.
235 237
236endchoice 238endchoice
237 239
@@ -709,6 +711,7 @@ config S390_GUEST
709 def_bool y 711 def_bool y
710 prompt "s390 support for virtio devices" 712 prompt "s390 support for virtio devices"
711 depends on 64BIT 713 depends on 64BIT
714 select TTY
712 select VIRTUALIZATION 715 select VIRTUALIZATION
713 select VIRTIO 716 select VIRTIO
714 select VIRTIO_CONSOLE 717 select VIRTIO_CONSOLE
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3ad8f61c9985..866ecbe670e4 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,9 +6,9 @@
6 6
7BITS := $(if $(CONFIG_64BIT),64,31) 7BITS := $(if $(CONFIG_64BIT),64,31)
8 8
9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ 9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
10 vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \ 10targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
11 sizes.h head$(BITS).o 11targets += misc.o piggy.o sizes.h head$(BITS).o
12 12
13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
48 48
49suffix-$(CONFIG_KERNEL_GZIP) := gz 49suffix-$(CONFIG_KERNEL_GZIP) := gz
50suffix-$(CONFIG_KERNEL_BZIP2) := bz2 50suffix-$(CONFIG_KERNEL_BZIP2) := bz2
51suffix-$(CONFIG_KERNEL_LZ4) := lz4
51suffix-$(CONFIG_KERNEL_LZMA) := lzma 52suffix-$(CONFIG_KERNEL_LZMA) := lzma
52suffix-$(CONFIG_KERNEL_LZO) := lzo 53suffix-$(CONFIG_KERNEL_LZO) := lzo
53suffix-$(CONFIG_KERNEL_XZ) := xz 54suffix-$(CONFIG_KERNEL_XZ) := xz
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
56 $(call if_changed,gzip) 57 $(call if_changed,gzip)
57$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) 58$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
58 $(call if_changed,bzip2) 59 $(call if_changed,bzip2)
60$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
61 $(call if_changed,lz4)
59$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) 62$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
60 $(call if_changed,lzma) 63 $(call if_changed,lzma)
61$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) 64$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index c4c6a1cf221b..57cbaff1f397 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr;
47#include "../../../../lib/decompress_bunzip2.c" 47#include "../../../../lib/decompress_bunzip2.c"
48#endif 48#endif
49 49
50#ifdef CONFIG_KERNEL_LZ4
51#include "../../../../lib/decompress_unlz4.c"
52#endif
53
50#ifdef CONFIG_KERNEL_LZMA 54#ifdef CONFIG_KERNEL_LZMA
51#include "../../../../lib/decompress_unlzma.c" 55#include "../../../../lib/decompress_unlzma.c"
52#endif 56#endif
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e311f3..7d4676758733 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
693 size -= offset; 693 size -= offset;
694 p = addr + offset / BITS_PER_LONG; 694 p = addr + offset / BITS_PER_LONG;
695 if (bit) { 695 if (bit) {
696 set = __flo_word(0, *p & (~0UL << bit)); 696 set = __flo_word(0, *p & (~0UL >> bit));
697 if (set >= size) 697 if (set >= size)
698 return size + offset; 698 return size + offset;
699 if (set < BITS_PER_LONG) 699 if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6b499870662f..b0e6435b2f02 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -91,7 +91,15 @@ struct thread_struct {
91#endif 91#endif
92}; 92};
93 93
94#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */ 94/* Flag to disable transactions. */
95#define PER_FLAG_NO_TE 1UL
96/* Flag to enable random transaction aborts. */
97#define PER_FLAG_TE_ABORT_RAND 2UL
98/* Flag to specify random transaction abort mode:
99 * - abort each transaction at a random instruction before TEND if set.
100 * - abort random transactions at a random instruction if cleared.
101 */
102#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
95 103
96typedef struct thread_struct thread_struct; 104typedef struct thread_struct thread_struct;
97 105
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index f3a9e0f92704..80b6f11263c4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -10,7 +10,7 @@
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11 11
12extern struct task_struct *__switch_to(void *, void *); 12extern struct task_struct *__switch_to(void *, void *);
13extern void update_per_regs(struct task_struct *task); 13extern void update_cr_regs(struct task_struct *task);
14 14
15static inline void save_fp_regs(s390_fp_regs *fpregs) 15static inline void save_fp_regs(s390_fp_regs *fpregs)
16{ 16{
@@ -86,7 +86,7 @@ static inline void restore_access_regs(unsigned int *acrs)
86 restore_fp_regs(&next->thread.fp_regs); \ 86 restore_fp_regs(&next->thread.fp_regs); \
87 restore_access_regs(&next->thread.acrs[0]); \ 87 restore_access_regs(&next->thread.acrs[0]); \
88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
89 update_per_regs(next); \ 89 update_cr_regs(next); \
90 } \ 90 } \
91 prev = __switch_to(prev,next); \ 91 prev = __switch_to(prev,next); \
92} while (0) 92} while (0)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 3aa9f1ec5b29..7a84619e315e 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -400,6 +400,7 @@ typedef struct
400#define PTRACE_POKE_SYSTEM_CALL 0x5008 400#define PTRACE_POKE_SYSTEM_CALL 0x5008
401#define PTRACE_ENABLE_TE 0x5009 401#define PTRACE_ENABLE_TE 0x5009
402#define PTRACE_DISABLE_TE 0x5010 402#define PTRACE_DISABLE_TE 0x5010
403#define PTRACE_TE_ABORT_RAND 0x5011
403 404
404/* 405/*
405 * PT_PROT definition is loosely based on hppa bsd definition in 406 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 64b24650e4f8..dd62071624be 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -173,7 +173,7 @@ error:
173 } 173 }
174} 174}
175 175
176static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu) 176static struct cache_dir *cache_create_cache_dir(int cpu)
177{ 177{
178 struct cache_dir *cache_dir; 178 struct cache_dir *cache_dir;
179 struct kobject *kobj = NULL; 179 struct kobject *kobj = NULL;
@@ -289,9 +289,8 @@ static struct kobj_type cache_index_type = {
289 .default_attrs = cache_index_default_attrs, 289 .default_attrs = cache_index_default_attrs,
290}; 290};
291 291
292static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir, 292static int cache_create_index_dir(struct cache_dir *cache_dir,
293 struct cache *cache, int index, 293 struct cache *cache, int index, int cpu)
294 int cpu)
295{ 294{
296 struct cache_index_dir *index_dir; 295 struct cache_index_dir *index_dir;
297 int rc; 296 int rc;
@@ -313,7 +312,7 @@ out:
313 return rc; 312 return rc;
314} 313}
315 314
316static int __cpuinit cache_add_cpu(int cpu) 315static int cache_add_cpu(int cpu)
317{ 316{
318 struct cache_dir *cache_dir; 317 struct cache_dir *cache_dir;
319 struct cache *cache; 318 struct cache *cache;
@@ -335,7 +334,7 @@ static int __cpuinit cache_add_cpu(int cpu)
335 return 0; 334 return 0;
336} 335}
337 336
338static void __cpuinit cache_remove_cpu(int cpu) 337static void cache_remove_cpu(int cpu)
339{ 338{
340 struct cache_index_dir *index, *next; 339 struct cache_index_dir *index, *next;
341 struct cache_dir *cache_dir; 340 struct cache_dir *cache_dir;
@@ -354,8 +353,8 @@ static void __cpuinit cache_remove_cpu(int cpu)
354 cache_dir_cpu[cpu] = NULL; 353 cache_dir_cpu[cpu] = NULL;
355} 354}
356 355
357static int __cpuinit cache_hotplug(struct notifier_block *nfb, 356static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
358 unsigned long action, void *hcpu) 357 void *hcpu)
359{ 358{
360 int cpu = (long)hcpu; 359 int cpu = (long)hcpu;
361 int rc = 0; 360 int rc = 0;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f703d91bf720..d8f355657171 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -21,6 +21,48 @@
21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) 22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
23 23
24
25/*
26 * Return physical address for virtual address
27 */
28static inline void *load_real_addr(void *addr)
29{
30 unsigned long real_addr;
31
32 asm volatile(
33 " lra %0,0(%1)\n"
34 " jz 0f\n"
35 " la %0,0\n"
36 "0:"
37 : "=a" (real_addr) : "a" (addr) : "cc");
38 return (void *)real_addr;
39}
40
41/*
42 * Copy up to one page to vmalloc or real memory
43 */
44static ssize_t copy_page_real(void *buf, void *src, size_t csize)
45{
46 size_t size;
47
48 if (is_vmalloc_addr(buf)) {
49 BUG_ON(csize >= PAGE_SIZE);
50 /* If buf is not page aligned, copy first part */
51 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
52 if (size) {
53 if (memcpy_real(load_real_addr(buf), src, size))
54 return -EFAULT;
55 buf += size;
56 src += size;
57 }
58 /* Copy second part */
59 size = csize - size;
60 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
61 } else {
62 return memcpy_real(buf, src, csize);
63 }
64}
65
24/* 66/*
25 * Copy one page from "oldmem" 67 * Copy one page from "oldmem"
26 * 68 *
@@ -32,6 +74,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
32 size_t csize, unsigned long offset, int userbuf) 74 size_t csize, unsigned long offset, int userbuf)
33{ 75{
34 unsigned long src; 76 unsigned long src;
77 int rc;
35 78
36 if (!csize) 79 if (!csize)
37 return 0; 80 return 0;
@@ -43,11 +86,11 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
43 src < OLDMEM_BASE + OLDMEM_SIZE) 86 src < OLDMEM_BASE + OLDMEM_SIZE)
44 src -= OLDMEM_BASE; 87 src -= OLDMEM_BASE;
45 if (userbuf) 88 if (userbuf)
46 copy_to_user_real((void __force __user *) buf, (void *) src, 89 rc = copy_to_user_real((void __force __user *) buf,
47 csize); 90 (void *) src, csize);
48 else 91 else
49 memcpy_real(buf, (void *) src, csize); 92 rc = copy_page_real(buf, (void *) src, csize);
50 return csize; 93 return (rc == 0) ? csize : rc;
51} 94}
52 95
53/* 96/*
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 390d9ae57bb2..fb99c2057b85 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -639,8 +639,8 @@ static struct pmu cpumf_pmu = {
639 .cancel_txn = cpumf_pmu_cancel_txn, 639 .cancel_txn = cpumf_pmu_cancel_txn,
640}; 640};
641 641
642static int __cpuinit cpumf_pmu_notifier(struct notifier_block *self, 642static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
643 unsigned long action, void *hcpu) 643 void *hcpu)
644{ 644{
645 unsigned int cpu = (long) hcpu; 645 unsigned int cpu = (long) hcpu;
646 int flags; 646 int flags;
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index a6fc037671b1..500aa1029bcb 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
52 52
53static bool is_in_guest(struct pt_regs *regs) 53static bool is_in_guest(struct pt_regs *regs)
54{ 54{
55 unsigned long ip = instruction_pointer(regs);
56
57 if (user_mode(regs)) 55 if (user_mode(regs))
58 return false; 56 return false;
59 57#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
60 return ip == (unsigned long) &sie_exit; 58 return instruction_pointer(regs) == (unsigned long) &sie_exit;
59#else
60 return false;
61#endif
61} 62}
62 63
63static unsigned long guest_is_user_mode(struct pt_regs *regs) 64static unsigned long guest_is_user_mode(struct pt_regs *regs)
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 753c41d0ffd3..24612029f450 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
21/* 21/*
22 * cpu_init - initializes state that is per-CPU. 22 * cpu_init - initializes state that is per-CPU.
23 */ 23 */
24void __cpuinit cpu_init(void) 24void cpu_init(void)
25{ 25{
26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
27 struct cpuid *id = &__get_cpu_var(cpu_id); 27 struct cpuid *id = &__get_cpu_var(cpu_id);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e94..e9fadb04e3c6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -47,7 +47,7 @@ enum s390_regset {
47 REGSET_GENERAL_EXTENDED, 47 REGSET_GENERAL_EXTENDED,
48}; 48};
49 49
50void update_per_regs(struct task_struct *task) 50void update_cr_regs(struct task_struct *task)
51{ 51{
52 struct pt_regs *regs = task_pt_regs(task); 52 struct pt_regs *regs = task_pt_regs(task);
53 struct thread_struct *thread = &task->thread; 53 struct thread_struct *thread = &task->thread;
@@ -56,17 +56,25 @@ void update_per_regs(struct task_struct *task)
56#ifdef CONFIG_64BIT 56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */ 57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) { 58 if (MACHINE_HAS_TE) {
59 unsigned long cr0, cr0_new; 59 unsigned long cr[3], cr_new[3];
60 60
61 __ctl_store(cr0, 0, 0); 61 __ctl_store(cr, 0, 2);
62 /* set or clear transaction execution bits 8 and 9. */ 62 cr_new[1] = cr[1];
63 /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
63 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new = cr0 & ~(3UL << 54); 65 cr_new[0] = cr[0] & ~(3UL << 54);
65 else 66 else
66 cr0_new = cr0 | (3UL << 54); 67 cr_new[0] = cr[0] | (3UL << 54);
67 /* Only load control register 0 if necessary. */ 68 /* Set or clear transaction execution TDC bits 62 and 63. */
68 if (cr0 != cr0_new) 69 cr_new[2] = cr[2] & ~3UL;
69 __ctl_load(cr0_new, 0, 0); 70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
72 cr_new[2] |= 1UL;
73 else
74 cr_new[2] |= 2UL;
75 }
76 if (memcmp(&cr_new, &cr, sizeof(cr)))
77 __ctl_load(cr_new, 0, 2);
70 } 78 }
71#endif 79#endif
72 /* Copy user specified PER registers */ 80 /* Copy user specified PER registers */
@@ -100,14 +108,14 @@ void user_enable_single_step(struct task_struct *task)
100{ 108{
101 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 109 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
102 if (task == current) 110 if (task == current)
103 update_per_regs(task); 111 update_cr_regs(task);
104} 112}
105 113
106void user_disable_single_step(struct task_struct *task) 114void user_disable_single_step(struct task_struct *task)
107{ 115{
108 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 116 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
109 if (task == current) 117 if (task == current)
110 update_per_regs(task); 118 update_cr_regs(task);
111} 119}
112 120
113/* 121/*
@@ -447,6 +455,26 @@ long arch_ptrace(struct task_struct *child, long request,
447 if (!MACHINE_HAS_TE) 455 if (!MACHINE_HAS_TE)
448 return -EIO; 456 return -EIO;
449 child->thread.per_flags |= PER_FLAG_NO_TE; 457 child->thread.per_flags |= PER_FLAG_NO_TE;
458 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
459 return 0;
460 case PTRACE_TE_ABORT_RAND:
461 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
462 return -EIO;
463 switch (data) {
464 case 0UL:
465 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
466 break;
467 case 1UL:
468 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
469 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
470 break;
471 case 2UL:
472 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
473 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
474 break;
475 default:
476 return -EINVAL;
477 }
450 return 0; 478 return 0;
451 default: 479 default:
452 /* Removing high order bit from addr (only for 31 bit). */ 480 /* Removing high order bit from addr (only for 31 bit). */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 497451ec5e26..aeed8a61fa0d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void)
994 strcpy(elf_platform, "z196"); 994 strcpy(elf_platform, "z196");
995 break; 995 break;
996 case 0x2827: 996 case 0x2827:
997 case 0x2828:
997 strcpy(elf_platform, "zEC12"); 998 strcpy(elf_platform, "zEC12");
998 break; 999 break;
999 } 1000 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 15a016c10563..d386c4e9d2e5 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -165,7 +165,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
165 pcpu_sigp_retry(pcpu, order, 0); 165 pcpu_sigp_retry(pcpu, order, 0);
166} 166}
167 167
168static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 168static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
169{ 169{
170 struct _lowcore *lc; 170 struct _lowcore *lc;
171 171
@@ -616,10 +616,9 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
616 return info; 616 return info;
617} 617}
618 618
619static int __cpuinit smp_add_present_cpu(int cpu); 619static int smp_add_present_cpu(int cpu);
620 620
621static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info, 621static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
622 int sysfs_add)
623{ 622{
624 struct pcpu *pcpu; 623 struct pcpu *pcpu;
625 cpumask_t avail; 624 cpumask_t avail;
@@ -685,7 +684,7 @@ static void __init smp_detect_cpus(void)
685/* 684/*
686 * Activate a secondary processor. 685 * Activate a secondary processor.
687 */ 686 */
688static void __cpuinit smp_start_secondary(void *cpuvoid) 687static void smp_start_secondary(void *cpuvoid)
689{ 688{
690 S390_lowcore.last_update_clock = get_tod_clock(); 689 S390_lowcore.last_update_clock = get_tod_clock();
691 S390_lowcore.restart_stack = (unsigned long) restart_stack; 690 S390_lowcore.restart_stack = (unsigned long) restart_stack;
@@ -708,7 +707,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
708} 707}
709 708
710/* Upping and downing of CPUs */ 709/* Upping and downing of CPUs */
711int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 710int __cpu_up(unsigned int cpu, struct task_struct *tidle)
712{ 711{
713 struct pcpu *pcpu; 712 struct pcpu *pcpu;
714 int rc; 713 int rc;
@@ -964,8 +963,8 @@ static struct attribute_group cpu_online_attr_group = {
964 .attrs = cpu_online_attrs, 963 .attrs = cpu_online_attrs,
965}; 964};
966 965
967static int __cpuinit smp_cpu_notify(struct notifier_block *self, 966static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
968 unsigned long action, void *hcpu) 967 void *hcpu)
969{ 968{
970 unsigned int cpu = (unsigned int)(long)hcpu; 969 unsigned int cpu = (unsigned int)(long)hcpu;
971 struct cpu *c = &pcpu_devices[cpu].cpu; 970 struct cpu *c = &pcpu_devices[cpu].cpu;
@@ -983,7 +982,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
983 return notifier_from_errno(err); 982 return notifier_from_errno(err);
984} 983}
985 984
986static int __cpuinit smp_add_present_cpu(int cpu) 985static int smp_add_present_cpu(int cpu)
987{ 986{
988 struct cpu *c = &pcpu_devices[cpu].cpu; 987 struct cpu *c = &pcpu_devices[cpu].cpu;
989 struct device *s = &c->dev; 988 struct device *s = &c->dev;
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 62f89d98e880..811f542b8ed4 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -418,7 +418,7 @@ void s390_adjust_jiffies(void)
418/* 418/*
419 * calibrate the delay loop 419 * calibrate the delay loop
420 */ 420 */
421void __cpuinit calibrate_delay(void) 421void calibrate_delay(void)
422{ 422{
423 s390_adjust_jiffies(); 423 s390_adjust_jiffies();
424 /* Print the good old Bogomips line .. */ 424 /* Print the good old Bogomips line .. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3fb09359eda6..9b9c1b78ec67 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -371,14 +371,14 @@ EXPORT_SYMBOL(del_virt_timer);
371/* 371/*
372 * Start the virtual CPU timer on the current CPU. 372 * Start the virtual CPU timer on the current CPU.
373 */ 373 */
374void __cpuinit init_cpu_vtimer(void) 374void init_cpu_vtimer(void)
375{ 375{
376 /* set initial cpu timer */ 376 /* set initial cpu timer */
377 set_vtimer(VTIMER_MAX_SLICE); 377 set_vtimer(VTIMER_MAX_SLICE);
378} 378}
379 379
380static int __cpuinit s390_nohz_notify(struct notifier_block *self, 380static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
381 unsigned long action, void *hcpu) 381 void *hcpu)
382{ 382{
383 struct s390_idle_data *idle; 383 struct s390_idle_data *idle;
384 long cpu = (long) hcpu; 384 long cpu = (long) hcpu;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba694d2ba51e..34c1c9a90be2 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
702 return rc; 702 return rc;
703 703
704 vcpu->arch.sie_block->icptcode = 0; 704 vcpu->arch.sie_block->icptcode = 0;
705 preempt_disable();
706 kvm_guest_enter();
707 preempt_enable();
708 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 705 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
709 atomic_read(&vcpu->arch.sie_block->cpuflags)); 706 atomic_read(&vcpu->arch.sie_block->cpuflags));
710 trace_kvm_s390_sie_enter(vcpu, 707 trace_kvm_s390_sie_enter(vcpu,
711 atomic_read(&vcpu->arch.sie_block->cpuflags)); 708 atomic_read(&vcpu->arch.sie_block->cpuflags));
709
710 /*
711 * As PF_VCPU will be used in fault handler, between guest_enter
712 * and guest_exit should be no uaccess.
713 */
714 preempt_disable();
715 kvm_guest_enter();
716 preempt_enable();
712 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); 717 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
718 kvm_guest_exit();
719
720 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
721 vcpu->arch.sie_block->icptcode);
722 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
723
713 if (rc > 0) 724 if (rc > 0)
714 rc = 0; 725 rc = 0;
715 if (rc < 0) { 726 if (rc < 0) {
@@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
721 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 732 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
722 } 733 }
723 } 734 }
724 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
725 vcpu->arch.sie_block->icptcode);
726 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
727 kvm_guest_exit();
728 735
729 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 736 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
730 return rc; 737 return rc;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0da3e6eb6be6..4cdc54e63ebc 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/compat.h> 17#include <linux/compat.h>
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/facility.h>
19#include <asm/current.h> 20#include <asm/current.h>
20#include <asm/debug.h> 21#include <asm/debug.h>
21#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
@@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
532 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 533 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
533 534
534 /* Only provide non-quiescing support if the host supports it */ 535 /* Only provide non-quiescing support if the host supports it */
535 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 536 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
536 S390_lowcore.stfl_fac_list & 0x00020000)
537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
538 538
539 /* No support for conditional-SSKE */ 539 /* No support for conditional-SSKE */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 047c3e4c59a2..f00aefb66a4e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -639,8 +639,8 @@ out:
639 put_task_struct(tsk); 639 put_task_struct(tsk);
640} 640}
641 641
642static int __cpuinit pfault_cpu_notify(struct notifier_block *self, 642static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
643 unsigned long action, void *hcpu) 643 void *hcpu)
644{ 644{
645 struct thread_struct *thread, *next; 645 struct thread_struct *thread, *next;
646 struct task_struct *tsk; 646 struct task_struct *tsk;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ce36ea80e4f9..ad446b0c55b6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
69 order = 2; 69 order = 2;
70 break; 70 break;
71 case 0x2827: /* zEC12 */ 71 case 0x2827: /* zEC12 */
72 case 0x2828: /* zEC12 */
72 default: 73 default:
73 order = 5; 74 order = 5;
74 break; 75 break;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..d5f10a43a58f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -9,6 +9,8 @@
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
11#include <linux/filter.h> 11#include <linux/filter.h>
12#include <linux/random.h>
13#include <linux/init.h>
12#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
13#include <asm/processor.h> 15#include <asm/processor.h>
14#include <asm/facility.h> 16#include <asm/facility.h>
@@ -221,6 +223,37 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
221 EMIT2(0x07fe); 223 EMIT2(0x07fe);
222} 224}
223 225
226/* Helper to find the offset of pkt_type in sk_buff
227 * Make sure its still a 3bit field starting at the MSBs within a byte.
228 */
229#define PKT_TYPE_MAX 0xe0
230static int pkt_type_offset;
231
232static int __init bpf_pkt_type_offset_init(void)
233{
234 struct sk_buff skb_probe = {
235 .pkt_type = ~0,
236 };
237 char *ct = (char *)&skb_probe;
238 int off;
239
240 pkt_type_offset = -1;
241 for (off = 0; off < sizeof(struct sk_buff); off++) {
242 if (!ct[off])
243 continue;
244 if (ct[off] == PKT_TYPE_MAX)
245 pkt_type_offset = off;
246 else {
247 /* Found non matching bit pattern, fix needed. */
248 WARN_ON_ONCE(1);
249 pkt_type_offset = -1;
250 return -1;
251 }
252 }
253 return 0;
254}
255device_initcall(bpf_pkt_type_offset_init);
256
224/* 257/*
225 * make sure we dont leak kernel information to user 258 * make sure we dont leak kernel information to user
226 */ 259 */
@@ -720,6 +753,16 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
720 EMIT4_DISP(0x88500000, 12); 753 EMIT4_DISP(0x88500000, 12);
721 } 754 }
722 break; 755 break;
756 case BPF_S_ANC_PKTTYPE:
757 if (pkt_type_offset < 0)
758 goto out;
759 /* lhi %r5,0 */
760 EMIT4(0xa7580000);
761 /* ic %r5,<d(pkt_type_offset)>(%r2) */
762 EMIT4_DISP(0x43502000, pkt_type_offset);
763 /* srl %r5,5 */
764 EMIT4_DISP(0x88500000, 5);
765 break;
723 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 766 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
724#ifdef CONFIG_SMP 767#ifdef CONFIG_SMP
725 /* l %r5,<d(cpu_nr)> */ 768 /* l %r5,<d(cpu_nr)> */
@@ -738,8 +781,41 @@ out:
738 return -1; 781 return -1;
739} 782}
740 783
784/*
785 * Note: for security reasons, bpf code will follow a randomly
786 * sized amount of illegal instructions.
787 */
788struct bpf_binary_header {
789 unsigned int pages;
790 u8 image[];
791};
792
793static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
794 u8 **image_ptr)
795{
796 struct bpf_binary_header *header;
797 unsigned int sz, hole;
798
799 /* Most BPF filters are really small, but if some of them fill a page,
800 * allow at least 128 extra bytes for illegal instructions.
801 */
802 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
803 header = module_alloc(sz);
804 if (!header)
805 return NULL;
806 memset(header, 0, sz);
807 header->pages = sz / PAGE_SIZE;
808 hole = sz - bpfsize + sizeof(*header);
809 /* Insert random number of illegal instructions before BPF code
810 * and make sure the first instruction starts at an even address.
811 */
812 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
813 return header;
814}
815
741void bpf_jit_compile(struct sk_filter *fp) 816void bpf_jit_compile(struct sk_filter *fp)
742{ 817{
818 struct bpf_binary_header *header = NULL;
743 unsigned long size, prg_len, lit_len; 819 unsigned long size, prg_len, lit_len;
744 struct bpf_jit jit, cjit; 820 struct bpf_jit jit, cjit;
745 unsigned int *addrs; 821 unsigned int *addrs;
@@ -772,12 +848,11 @@ void bpf_jit_compile(struct sk_filter *fp)
772 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) { 848 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
773 prg_len = jit.prg - jit.start; 849 prg_len = jit.prg - jit.start;
774 lit_len = jit.lit - jit.mid; 850 lit_len = jit.lit - jit.mid;
775 size = max_t(unsigned long, prg_len + lit_len, 851 size = prg_len + lit_len;
776 sizeof(struct work_struct));
777 if (size >= BPF_SIZE_MAX) 852 if (size >= BPF_SIZE_MAX)
778 goto out; 853 goto out;
779 jit.start = module_alloc(size); 854 header = bpf_alloc_binary(size, &jit.start);
780 if (!jit.start) 855 if (!header)
781 goto out; 856 goto out;
782 jit.prg = jit.mid = jit.start + prg_len; 857 jit.prg = jit.mid = jit.start + prg_len;
783 jit.lit = jit.end = jit.start + prg_len + lit_len; 858 jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -788,37 +863,25 @@ void bpf_jit_compile(struct sk_filter *fp)
788 cjit = jit; 863 cjit = jit;
789 } 864 }
790 if (bpf_jit_enable > 1) { 865 if (bpf_jit_enable > 1) {
791 pr_err("flen=%d proglen=%lu pass=%d image=%p\n", 866 bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
792 fp->len, jit.end - jit.start, pass, jit.start); 867 if (jit.start)
793 if (jit.start) {
794 printk(KERN_ERR "JIT code:\n");
795 print_fn_code(jit.start, jit.mid - jit.start); 868 print_fn_code(jit.start, jit.mid - jit.start);
796 print_hex_dump(KERN_ERR, "JIT literals:\n",
797 DUMP_PREFIX_ADDRESS, 16, 1,
798 jit.mid, jit.end - jit.mid, false);
799 }
800 } 869 }
801 if (jit.start) 870 if (jit.start) {
871 set_memory_ro((unsigned long)header, header->pages);
802 fp->bpf_func = (void *) jit.start; 872 fp->bpf_func = (void *) jit.start;
873 }
803out: 874out:
804 kfree(addrs); 875 kfree(addrs);
805} 876}
806 877
807static void jit_free_defer(struct work_struct *arg)
808{
809 module_free(NULL, arg);
810}
811
812/* run from softirq, we must use a work_struct to call
813 * module_free() from process context
814 */
815void bpf_jit_free(struct sk_filter *fp) 878void bpf_jit_free(struct sk_filter *fp)
816{ 879{
817 struct work_struct *work; 880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
881 struct bpf_binary_header *header = (void *)addr;
818 882
819 if (fp->bpf_func == sk_run_filter) 883 if (fp->bpf_func == sk_run_filter)
820 return; 884 return;
821 work = (struct work_struct *)fp->bpf_func; 885 set_memory_rw(addr, header->pages);
822 INIT_WORK(work, jit_free_defer); 886 module_free(NULL, header);
823 schedule_work(work);
824} 887}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17ce7f31..930783d2c99b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
440 switch (id.machine) { 440 switch (id.machine) {
441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; 441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; 442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
443 case 0x2827: ops->cpu_type = "s390/zEC12"; break; 443 case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
444 default: return -ENODEV; 444 default: return -ENODEV;
445 } 445 }
446 } 446 }
diff --git a/arch/score/mm/tlb-score.c b/arch/score/mm/tlb-score.c
index 6fdb100244c8..004073717de0 100644
--- a/arch/score/mm/tlb-score.c
+++ b/arch/score/mm/tlb-score.c
@@ -240,7 +240,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
240 local_irq_restore(flags); 240 local_irq_restore(flags);
241} 241}
242 242
243void __cpuinit tlb_init(void) 243void tlb_init(void)
244{ 244{
245 tlblock_set(0); 245 tlblock_set(0);
246 local_flush_tlb_all(); 246 local_flush_tlb_all();
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 2051821724c6..0cf4097b71e8 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -22,7 +22,7 @@ CONFIG_PREEMPT=y
22CONFIG_CMDLINE_OVERWRITE=y 22CONFIG_CMDLINE_OVERWRITE=y
23CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs" 23CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
24CONFIG_PCI=y 24CONFIG_PCI=y
25CONFIG_HOTPLUG_PCI=m 25CONFIG_HOTPLUG_PCI=y
26CONFIG_BINFMT_MISC=y 26CONFIG_BINFMT_MISC=y
27CONFIG_NET=y 27CONFIG_NET=y
28CONFIG_PACKET=y 28CONFIG_PACKET=y
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 61a07dafcd46..ecf83cd158dc 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -43,9 +43,9 @@
43 * peripherals (nofpu, nodsp, and so forth). 43 * peripherals (nofpu, nodsp, and so forth).
44 */ 44 */
45#define onchip_setup(x) \ 45#define onchip_setup(x) \
46static int x##_disabled __cpuinitdata = !cpu_has_##x; \ 46static int x##_disabled = !cpu_has_##x; \
47 \ 47 \
48static int __cpuinit x##_setup(char *opts) \ 48static int x##_setup(char *opts) \
49{ \ 49{ \
50 x##_disabled = 1; \ 50 x##_disabled = 1; \
51 return 1; \ 51 return 1; \
@@ -59,7 +59,7 @@ onchip_setup(dsp);
59#define CPUOPM 0xff2f0000 59#define CPUOPM 0xff2f0000
60#define CPUOPM_RABD (1 << 5) 60#define CPUOPM_RABD (1 << 5)
61 61
62static void __cpuinit speculative_execution_init(void) 62static void speculative_execution_init(void)
63{ 63{
64 /* Clear RABD */ 64 /* Clear RABD */
65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); 65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
@@ -78,7 +78,7 @@ static void __cpuinit speculative_execution_init(void)
78#define EXPMASK_BRDSSLP (1 << 1) 78#define EXPMASK_BRDSSLP (1 << 1)
79#define EXPMASK_MMCAW (1 << 4) 79#define EXPMASK_MMCAW (1 << 4)
80 80
81static void __cpuinit expmask_init(void) 81static void expmask_init(void)
82{ 82{
83 unsigned long expmask = __raw_readl(EXPMASK); 83 unsigned long expmask = __raw_readl(EXPMASK);
84 84
@@ -217,7 +217,7 @@ static void detect_cache_shape(void)
217 l2_cache_shape = -1; /* No S-cache */ 217 l2_cache_shape = -1; /* No S-cache */
218} 218}
219 219
220static void __cpuinit fpu_init(void) 220static void fpu_init(void)
221{ 221{
222 /* Disable the FPU */ 222 /* Disable the FPU */
223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { 223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
@@ -230,7 +230,7 @@ static void __cpuinit fpu_init(void)
230} 230}
231 231
232#ifdef CONFIG_SH_DSP 232#ifdef CONFIG_SH_DSP
233static void __cpuinit release_dsp(void) 233static void release_dsp(void)
234{ 234{
235 unsigned long sr; 235 unsigned long sr;
236 236
@@ -244,7 +244,7 @@ static void __cpuinit release_dsp(void)
244 ); 244 );
245} 245}
246 246
247static void __cpuinit dsp_init(void) 247static void dsp_init(void)
248{ 248{
249 unsigned long sr; 249 unsigned long sr;
250 250
@@ -276,7 +276,7 @@ static void __cpuinit dsp_init(void)
276 release_dsp(); 276 release_dsp();
277} 277}
278#else 278#else
279static inline void __cpuinit dsp_init(void) { } 279static inline void dsp_init(void) { }
280#endif /* CONFIG_SH_DSP */ 280#endif /* CONFIG_SH_DSP */
281 281
282/** 282/**
@@ -295,7 +295,7 @@ static inline void __cpuinit dsp_init(void) { }
295 * Each processor family is still responsible for doing its own probing 295 * Each processor family is still responsible for doing its own probing
296 * and cache configuration in cpu_probe(). 296 * and cache configuration in cpu_probe().
297 */ 297 */
298asmlinkage void __cpuinit cpu_init(void) 298asmlinkage void cpu_init(void)
299{ 299{
300 current_thread_info()->cpu = hard_smp_processor_id(); 300 current_thread_info()->cpu = hard_smp_processor_id();
301 301
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index bab8e75958ae..6c687ae812ef 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15 15
16void __cpuinit cpu_probe(void) 16void cpu_probe(void)
17{ 17{
18#if defined(CONFIG_CPU_SUBTYPE_SH7619) 18#if defined(CONFIG_CPU_SUBTYPE_SH7619)
19 boot_cpu_data.type = CPU_SH7619; 19 boot_cpu_data.type = CPU_SH7619;
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 5170b6aa4129..3f87971082f1 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15 15
16void __cpuinit cpu_probe(void) 16void cpu_probe(void)
17{ 17{
18 boot_cpu_data.family = CPU_FAMILY_SH2A; 18 boot_cpu_data.family = CPU_FAMILY_SH2A;
19 19
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index bf23c322e164..426e1e1dcedc 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19void __cpuinit cpu_probe(void) 19void cpu_probe(void)
20{ 20{
21 unsigned long addr0, addr1, data0, data1, data2, data3; 21 unsigned long addr0, addr1, data0, data1, data2, data3;
22 22
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 0fbbd50bc8ad..a521bcf50695 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -15,7 +15,7 @@
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17 17
18void __cpuinit cpu_probe(void) 18void cpu_probe(void)
19{ 19{
20 unsigned long pvr, prr, cvr; 20 unsigned long pvr, prr, cvr;
21 unsigned long size; 21 unsigned long size;
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 03f2b55757cf..4a298808789c 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -124,7 +124,7 @@ static void shx3_update_boot_vector(unsigned int cpu)
124 __raw_writel(STBCR_RESET, STBCR_REG(cpu)); 124 __raw_writel(STBCR_RESET, STBCR_REG(cpu));
125} 125}
126 126
127static int __cpuinit 127static int
128shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 128shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
129{ 129{
130 unsigned int cpu = (unsigned int)hcpu; 130 unsigned int cpu = (unsigned int)hcpu;
@@ -143,11 +143,11 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
143 return NOTIFY_OK; 143 return NOTIFY_OK;
144} 144}
145 145
146static struct notifier_block __cpuinitdata shx3_cpu_notifier = { 146static struct notifier_block shx3_cpu_notifier = {
147 .notifier_call = shx3_cpu_callback, 147 .notifier_call = shx3_cpu_callback,
148}; 148};
149 149
150static int __cpuinit register_shx3_cpu_notifier(void) 150static int register_shx3_cpu_notifier(void)
151{ 151{
152 register_hotcpu_notifier(&shx3_cpu_notifier); 152 register_hotcpu_notifier(&shx3_cpu_notifier);
153 return 0; 153 return 0;
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 9e882409e4e9..eca427c2f2f3 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -17,7 +17,7 @@
17#include <asm/cache.h> 17#include <asm/cache.h>
18#include <asm/tlb.h> 18#include <asm/tlb.h>
19 19
20void __cpuinit cpu_probe(void) 20void cpu_probe(void)
21{ 21{
22 unsigned long long cir; 22 unsigned long long cir;
23 23
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 068b8a2759b5..b9cefebda55c 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -367,7 +367,7 @@ static void sh_pmu_setup(int cpu)
367 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); 367 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
368} 368}
369 369
370static int __cpuinit 370static int
371sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 371sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
372{ 372{
373 unsigned int cpu = (long)hcpu; 373 unsigned int cpu = (long)hcpu;
@@ -384,7 +384,7 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
384 return NOTIFY_OK; 384 return NOTIFY_OK;
385} 385}
386 386
387int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) 387int register_sh_pmu(struct sh_pmu *_pmu)
388{ 388{
389 if (sh_pmu) 389 if (sh_pmu)
390 return -EBUSY; 390 return -EBUSY;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 055d91b70305..53bc6c4c84ec 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -65,7 +65,7 @@ void arch_task_cache_init(void)
65# define HAVE_SOFTFP 0 65# define HAVE_SOFTFP 0
66#endif 66#endif
67 67
68void __cpuinit init_thread_xstate(void) 68void init_thread_xstate(void)
69{ 69{
70 if (boot_cpu_data.flags & CPU_HAS_FPU) 70 if (boot_cpu_data.flags & CPU_HAS_FPU)
71 xstate_size = sizeof(struct sh_fpu_hard_struct); 71 xstate_size = sizeof(struct sh_fpu_hard_struct);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index ebe7a7d97215..1cf90e947dbf 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -172,7 +172,7 @@ disable:
172#endif 172#endif
173} 173}
174 174
175void __cpuinit calibrate_delay(void) 175void calibrate_delay(void)
176{ 176{
177 struct clk *clk = clk_get(NULL, "cpu_clk"); 177 struct clk *clk = clk_get(NULL, "cpu_clk");
178 178
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 45696451f0ea..86a7936a980b 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -37,7 +37,7 @@ struct plat_smp_ops *mp_ops = NULL;
37/* State of each CPU */ 37/* State of each CPU */
38DEFINE_PER_CPU(int, cpu_state) = { 0 }; 38DEFINE_PER_CPU(int, cpu_state) = { 0 };
39 39
40void __cpuinit register_smp_ops(struct plat_smp_ops *ops) 40void register_smp_ops(struct plat_smp_ops *ops)
41{ 41{
42 if (mp_ops) 42 if (mp_ops)
43 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 43 printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -45,7 +45,7 @@ void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
45 mp_ops = ops; 45 mp_ops = ops;
46} 46}
47 47
48static inline void __cpuinit smp_store_cpu_info(unsigned int cpu) 48static inline void smp_store_cpu_info(unsigned int cpu)
49{ 49{
50 struct sh_cpuinfo *c = cpu_data + cpu; 50 struct sh_cpuinfo *c = cpu_data + cpu;
51 51
@@ -174,7 +174,7 @@ void native_play_dead(void)
174} 174}
175#endif 175#endif
176 176
177asmlinkage void __cpuinit start_secondary(void) 177asmlinkage void start_secondary(void)
178{ 178{
179 unsigned int cpu = smp_processor_id(); 179 unsigned int cpu = smp_processor_id();
180 struct mm_struct *mm = &init_mm; 180 struct mm_struct *mm = &init_mm;
@@ -215,7 +215,7 @@ extern struct {
215 void *thread_info; 215 void *thread_info;
216} stack_start; 216} stack_start;
217 217
218int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tsk) 218int __cpu_up(unsigned int cpu, struct task_struct *tsk)
219{ 219{
220 unsigned long timeout; 220 unsigned long timeout;
221 221
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 5f513a64dedf..68e99f09171d 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -741,7 +741,7 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
741 die_if_kernel("exception", regs, ex); 741 die_if_kernel("exception", regs, ex);
742} 742}
743 743
744void __cpuinit per_cpu_trap_init(void) 744void per_cpu_trap_init(void)
745{ 745{
746 extern void *vbr_base; 746 extern void *vbr_base;
747 747
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index f87d20da1791..112ea11c030d 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -810,7 +810,7 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
810 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); 810 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
811} 811}
812 812
813void __cpuinit per_cpu_trap_init(void) 813void per_cpu_trap_init(void)
814{ 814{
815 /* Nothing to do for now, VBR initialization later. */ 815 /* Nothing to do for now, VBR initialization later. */
816} 816}
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index ff1c40a31cbc..e4bb2a8e0a69 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -17,7 +17,7 @@
17/** 17/**
18 * sh64_tlb_init - Perform initial setup for the DTLB and ITLB. 18 * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
19 */ 19 */
20int __cpuinit sh64_tlb_init(void) 20int sh64_tlb_init(void)
21{ 21{
22 /* Assign some sane DTLB defaults */ 22 /* Assign some sane DTLB defaults */
23 cpu_data->dtlb.entries = 64; 23 cpu_data->dtlb.entries = 64;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 11d460f6f9cc..62d6b153ffa2 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -528,10 +528,8 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
528 } 528 }
529} 529}
530 530
531static int __cpuinit dr_cpu_configure(struct ds_info *dp, 531static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp,
532 struct ds_cap_state *cp, 532 u64 req_num, cpumask_t *mask)
533 u64 req_num,
534 cpumask_t *mask)
535{ 533{
536 struct ds_data *resp; 534 struct ds_data *resp;
537 int resp_len, ncpus, cpu; 535 int resp_len, ncpus, cpu;
@@ -627,9 +625,8 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
627 return 0; 625 return 0;
628} 626}
629 627
630static void __cpuinit dr_cpu_data(struct ds_info *dp, 628static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf,
631 struct ds_cap_state *cp, 629 int len)
632 void *buf, int len)
633{ 630{
634 struct ds_data *data = buf; 631 struct ds_data *data = buf;
635 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); 632 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index cc3c5cb47cda..9c179fbfb219 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -250,7 +250,7 @@ extern struct ino_bucket *ivector_table;
250extern unsigned long ivector_table_pa; 250extern unsigned long ivector_table_pa;
251 251
252extern void init_irqwork_curcpu(void); 252extern void init_irqwork_curcpu(void);
253extern void __cpuinit sun4v_register_mondo_queues(int this_cpu); 253extern void sun4v_register_mondo_queues(int this_cpu);
254 254
255#endif /* CONFIG_SPARC32 */ 255#endif /* CONFIG_SPARC32 */
256#endif /* _ENTRY_H */ 256#endif /* _ENTRY_H */
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 605c960b2fa6..4eb1a5a1d544 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -16,7 +16,6 @@
16#include <asm/asi.h> 16#include <asm/asi.h>
17#include <asm/pil.h> 17#include <asm/pil.h>
18 18
19 __CPUINIT
20 .align 8 19 .align 8
21 .globl hv_cpu_startup, hv_cpu_startup_end 20 .globl hv_cpu_startup, hv_cpu_startup_end
22 21
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 9bcbbe2c4e7e..d4840cec2c55 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -835,7 +835,8 @@ void notrace init_irqwork_curcpu(void)
835 * Therefore you cannot make any OBP calls, not even prom_printf, 835 * Therefore you cannot make any OBP calls, not even prom_printf,
836 * from these two routines. 836 * from these two routines.
837 */ 837 */
838static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) 838static void notrace register_one_mondo(unsigned long paddr, unsigned long type,
839 unsigned long qmask)
839{ 840{
840 unsigned long num_entries = (qmask + 1) / 64; 841 unsigned long num_entries = (qmask + 1) / 64;
841 unsigned long status; 842 unsigned long status;
@@ -848,7 +849,7 @@ static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned l
848 } 849 }
849} 850}
850 851
851void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) 852void notrace sun4v_register_mondo_queues(int this_cpu)
852{ 853{
853 struct trap_per_cpu *tb = &trap_block[this_cpu]; 854 struct trap_per_cpu *tb = &trap_block[this_cpu];
854 855
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index d7aa524b7283..6edf955f987c 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -54,7 +54,7 @@ extern ctxd_t *srmmu_ctx_table_phys;
54static int smp_processors_ready; 54static int smp_processors_ready;
55extern volatile unsigned long cpu_callin_map[NR_CPUS]; 55extern volatile unsigned long cpu_callin_map[NR_CPUS];
56extern cpumask_t smp_commenced_mask; 56extern cpumask_t smp_commenced_mask;
57void __cpuinit leon_configure_cache_smp(void); 57void leon_configure_cache_smp(void);
58static void leon_ipi_init(void); 58static void leon_ipi_init(void);
59 59
60/* IRQ number of LEON IPIs */ 60/* IRQ number of LEON IPIs */
@@ -69,12 +69,12 @@ static inline unsigned long do_swap(volatile unsigned long *ptr,
69 return val; 69 return val;
70} 70}
71 71
72void __cpuinit leon_cpu_pre_starting(void *arg) 72void leon_cpu_pre_starting(void *arg)
73{ 73{
74 leon_configure_cache_smp(); 74 leon_configure_cache_smp();
75} 75}
76 76
77void __cpuinit leon_cpu_pre_online(void *arg) 77void leon_cpu_pre_online(void *arg)
78{ 78{
79 int cpuid = hard_smp_processor_id(); 79 int cpuid = hard_smp_processor_id();
80 80
@@ -106,7 +106,7 @@ void __cpuinit leon_cpu_pre_online(void *arg)
106 106
107extern struct linux_prom_registers smp_penguin_ctable; 107extern struct linux_prom_registers smp_penguin_ctable;
108 108
109void __cpuinit leon_configure_cache_smp(void) 109void leon_configure_cache_smp(void)
110{ 110{
111 unsigned long cfg = sparc_leon3_get_dcachecfg(); 111 unsigned long cfg = sparc_leon3_get_dcachecfg();
112 int me = smp_processor_id(); 112 int me = smp_processor_id();
@@ -186,7 +186,7 @@ void __init leon_boot_cpus(void)
186 186
187} 187}
188 188
189int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle) 189int leon_boot_one_cpu(int i, struct task_struct *idle)
190{ 190{
191 int timeout; 191 int timeout;
192 192
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 831c001604e8..b90bf23e3aab 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -571,9 +571,7 @@ static void __init report_platform_properties(void)
571 mdesc_release(hp); 571 mdesc_release(hp);
572} 572}
573 573
574static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, 574static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
575 struct mdesc_handle *hp,
576 u64 mp)
577{ 575{
578 const u64 *level = mdesc_get_property(hp, mp, "level", NULL); 576 const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
579 const u64 *size = mdesc_get_property(hp, mp, "size", NULL); 577 const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
@@ -616,7 +614,7 @@ static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
616 } 614 }
617} 615}
618 616
619static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) 617static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
620{ 618{
621 u64 a; 619 u64 a;
622 620
@@ -649,7 +647,7 @@ static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id
649 } 647 }
650} 648}
651 649
652static void __cpuinit set_core_ids(struct mdesc_handle *hp) 650static void set_core_ids(struct mdesc_handle *hp)
653{ 651{
654 int idx; 652 int idx;
655 u64 mp; 653 u64 mp;
@@ -674,7 +672,7 @@ static void __cpuinit set_core_ids(struct mdesc_handle *hp)
674 } 672 }
675} 673}
676 674
677static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) 675static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
678{ 676{
679 u64 a; 677 u64 a;
680 678
@@ -693,7 +691,7 @@ static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id
693 } 691 }
694} 692}
695 693
696static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) 694static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
697{ 695{
698 int idx; 696 int idx;
699 u64 mp; 697 u64 mp;
@@ -714,14 +712,14 @@ static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_u
714 } 712 }
715} 713}
716 714
717static void __cpuinit set_proc_ids(struct mdesc_handle *hp) 715static void set_proc_ids(struct mdesc_handle *hp)
718{ 716{
719 __set_proc_ids(hp, "exec_unit"); 717 __set_proc_ids(hp, "exec_unit");
720 __set_proc_ids(hp, "exec-unit"); 718 __set_proc_ids(hp, "exec-unit");
721} 719}
722 720
723static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, 721static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
724 unsigned long def, unsigned long max) 722 unsigned long def, unsigned long max)
725{ 723{
726 u64 val; 724 u64 val;
727 725
@@ -742,8 +740,8 @@ use_default:
742 *mask = ((1U << def) * 64U) - 1U; 740 *mask = ((1U << def) * 64U) - 1U;
743} 741}
744 742
745static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, 743static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
746 struct trap_per_cpu *tb) 744 struct trap_per_cpu *tb)
747{ 745{
748 static int printed; 746 static int printed;
749 const u64 *val; 747 const u64 *val;
@@ -769,7 +767,7 @@ static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
769 } 767 }
770} 768}
771 769
772static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) 770static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
773{ 771{
774 struct mdesc_handle *hp = mdesc_grab(); 772 struct mdesc_handle *hp = mdesc_grab();
775 void *ret = NULL; 773 void *ret = NULL;
@@ -799,7 +797,8 @@ out:
799 return ret; 797 return ret;
800} 798}
801 799
802static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) 800static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
801 void *arg)
803{ 802{
804 ncpus_probed++; 803 ncpus_probed++;
805#ifdef CONFIG_SMP 804#ifdef CONFIG_SMP
@@ -808,7 +807,7 @@ static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpui
808 return NULL; 807 return NULL;
809} 808}
810 809
811void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) 810void mdesc_populate_present_mask(cpumask_t *mask)
812{ 811{
813 if (tlb_type != hypervisor) 812 if (tlb_type != hypervisor)
814 return; 813 return;
@@ -841,7 +840,8 @@ void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
841 mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask); 840 mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
842} 841}
843 842
844static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) 843static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
844 void *arg)
845{ 845{
846 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); 846 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
847 struct trap_per_cpu *tb; 847 struct trap_per_cpu *tb;
@@ -890,7 +890,7 @@ static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpu
890 return NULL; 890 return NULL;
891} 891}
892 892
893void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask) 893void mdesc_fill_in_cpu_data(cpumask_t *mask)
894{ 894{
895 struct mdesc_handle *hp; 895 struct mdesc_handle *hp;
896 896
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e3f2b81c23f1..a102bfba6ea8 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -39,7 +39,7 @@
39#include "kernel.h" 39#include "kernel.h"
40#include "irq.h" 40#include "irq.h"
41 41
42volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; 42volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
43 43
44cpumask_t smp_commenced_mask = CPU_MASK_NONE; 44cpumask_t smp_commenced_mask = CPU_MASK_NONE;
45 45
@@ -53,7 +53,7 @@ const struct sparc32_ipi_ops *sparc32_ipi_ops;
53 * instruction which is much better... 53 * instruction which is much better...
54 */ 54 */
55 55
56void __cpuinit smp_store_cpu_info(int id) 56void smp_store_cpu_info(int id)
57{ 57{
58 int cpu_node; 58 int cpu_node;
59 int mid; 59 int mid;
@@ -120,7 +120,7 @@ void cpu_panic(void)
120 panic("SMP bolixed\n"); 120 panic("SMP bolixed\n");
121} 121}
122 122
123struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; 123struct linux_prom_registers smp_penguin_ctable = { 0 };
124 124
125void smp_send_reschedule(int cpu) 125void smp_send_reschedule(int cpu)
126{ 126{
@@ -259,10 +259,10 @@ void __init smp_prepare_boot_cpu(void)
259 set_cpu_possible(cpuid, true); 259 set_cpu_possible(cpuid, true);
260} 260}
261 261
262int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 262int __cpu_up(unsigned int cpu, struct task_struct *tidle)
263{ 263{
264 extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *); 264 extern int smp4m_boot_one_cpu(int, struct task_struct *);
265 extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *); 265 extern int smp4d_boot_one_cpu(int, struct task_struct *);
266 int ret=0; 266 int ret=0;
267 267
268 switch(sparc_cpu_model) { 268 switch(sparc_cpu_model) {
@@ -297,7 +297,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
297 return ret; 297 return ret;
298} 298}
299 299
300void __cpuinit arch_cpu_pre_starting(void *arg) 300void arch_cpu_pre_starting(void *arg)
301{ 301{
302 local_ops->cache_all(); 302 local_ops->cache_all();
303 local_ops->tlb_all(); 303 local_ops->tlb_all();
@@ -317,7 +317,7 @@ void __cpuinit arch_cpu_pre_starting(void *arg)
317 } 317 }
318} 318}
319 319
320void __cpuinit arch_cpu_pre_online(void *arg) 320void arch_cpu_pre_online(void *arg)
321{ 321{
322 unsigned int cpuid = hard_smp_processor_id(); 322 unsigned int cpuid = hard_smp_processor_id();
323 323
@@ -344,7 +344,7 @@ void __cpuinit arch_cpu_pre_online(void *arg)
344 } 344 }
345} 345}
346 346
347void __cpuinit sparc_start_secondary(void *arg) 347void sparc_start_secondary(void *arg)
348{ 348{
349 unsigned int cpu; 349 unsigned int cpu;
350 350
@@ -375,7 +375,7 @@ void __cpuinit sparc_start_secondary(void *arg)
375 BUG(); 375 BUG();
376} 376}
377 377
378void __cpuinit smp_callin(void) 378void smp_callin(void)
379{ 379{
380 sparc_start_secondary(NULL); 380 sparc_start_secondary(NULL);
381} 381}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 77539eda928c..e142545244f2 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -87,7 +87,7 @@ extern void setup_sparc64_timer(void);
87 87
88static volatile unsigned long callin_flag = 0; 88static volatile unsigned long callin_flag = 0;
89 89
90void __cpuinit smp_callin(void) 90void smp_callin(void)
91{ 91{
92 int cpuid = hard_smp_processor_id(); 92 int cpuid = hard_smp_processor_id();
93 93
@@ -281,7 +281,8 @@ static unsigned long kimage_addr_to_ra(void *p)
281 return kern_base + (val - KERNBASE); 281 return kern_base + (val - KERNBASE);
282} 282}
283 283
284static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) 284static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
285 void **descrp)
285{ 286{
286 extern unsigned long sparc64_ttable_tl0; 287 extern unsigned long sparc64_ttable_tl0;
287 extern unsigned long kern_locked_tte_data; 288 extern unsigned long kern_locked_tte_data;
@@ -342,7 +343,7 @@ extern unsigned long sparc64_cpu_startup;
342 */ 343 */
343static struct thread_info *cpu_new_thread = NULL; 344static struct thread_info *cpu_new_thread = NULL;
344 345
345static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) 346static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
346{ 347{
347 unsigned long entry = 348 unsigned long entry =
348 (unsigned long)(&sparc64_cpu_startup); 349 (unsigned long)(&sparc64_cpu_startup);
@@ -1266,7 +1267,7 @@ void smp_fill_in_sib_core_maps(void)
1266 } 1267 }
1267} 1268}
1268 1269
1269int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 1270int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1270{ 1271{
1271 int ret = smp_boot_one_cpu(cpu, tidle); 1272 int ret = smp_boot_one_cpu(cpu, tidle);
1272 1273
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index c9eb82f23d92..d5c319553fd0 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -50,7 +50,7 @@ static inline void show_leds(int cpuid)
50 "i" (ASI_M_CTL)); 50 "i" (ASI_M_CTL));
51} 51}
52 52
53void __cpuinit sun4d_cpu_pre_starting(void *arg) 53void sun4d_cpu_pre_starting(void *arg)
54{ 54{
55 int cpuid = hard_smp_processor_id(); 55 int cpuid = hard_smp_processor_id();
56 56
@@ -62,7 +62,7 @@ void __cpuinit sun4d_cpu_pre_starting(void *arg)
62 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); 62 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
63} 63}
64 64
65void __cpuinit sun4d_cpu_pre_online(void *arg) 65void sun4d_cpu_pre_online(void *arg)
66{ 66{
67 unsigned long flags; 67 unsigned long flags;
68 int cpuid; 68 int cpuid;
@@ -118,7 +118,7 @@ void __init smp4d_boot_cpus(void)
118 local_ops->cache_all(); 118 local_ops->cache_all();
119} 119}
120 120
121int __cpuinit smp4d_boot_one_cpu(int i, struct task_struct *idle) 121int smp4d_boot_one_cpu(int i, struct task_struct *idle)
122{ 122{
123 unsigned long *entry = &sun4d_cpu_startup; 123 unsigned long *entry = &sun4d_cpu_startup;
124 int timeout; 124 int timeout;
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 8a65f158153d..d3408e72d20c 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -34,11 +34,11 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
34 return val; 34 return val;
35} 35}
36 36
37void __cpuinit sun4m_cpu_pre_starting(void *arg) 37void sun4m_cpu_pre_starting(void *arg)
38{ 38{
39} 39}
40 40
41void __cpuinit sun4m_cpu_pre_online(void *arg) 41void sun4m_cpu_pre_online(void *arg)
42{ 42{
43 int cpuid = hard_smp_processor_id(); 43 int cpuid = hard_smp_processor_id();
44 44
@@ -75,7 +75,7 @@ void __init smp4m_boot_cpus(void)
75 local_ops->cache_all(); 75 local_ops->cache_all();
76} 76}
77 77
78int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle) 78int smp4m_boot_one_cpu(int i, struct task_struct *idle)
79{ 79{
80 unsigned long *entry = &sun4m_cpu_startup; 80 unsigned long *entry = &sun4m_cpu_startup;
81 int timeout; 81 int timeout;
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 654e8aad3bbe..c21c673e5f7c 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -246,7 +246,7 @@ static void unregister_cpu_online(unsigned int cpu)
246} 246}
247#endif 247#endif
248 248
249static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, 249static int sysfs_cpu_notify(struct notifier_block *self,
250 unsigned long action, void *hcpu) 250 unsigned long action, void *hcpu)
251{ 251{
252 unsigned int cpu = (unsigned int)(long)hcpu; 252 unsigned int cpu = (unsigned int)(long)hcpu;
@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
266 return NOTIFY_OK; 266 return NOTIFY_OK;
267} 267}
268 268
269static struct notifier_block __cpuinitdata sysfs_cpu_nb = { 269static struct notifier_block sysfs_cpu_nb = {
270 .notifier_call = sysfs_cpu_notify, 270 .notifier_call = sysfs_cpu_notify,
271}; 271};
272 272
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 6cdb08cdabf0..76dcbd3c988a 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -18,7 +18,6 @@
18 .globl sun4m_cpu_startup 18 .globl sun4m_cpu_startup
19 .globl sun4d_cpu_startup 19 .globl sun4d_cpu_startup
20 20
21 __CPUINIT
22 .align 4 21 .align 4
23 22
24/* When we start up a cpu for the first time it enters this routine. 23/* When we start up a cpu for the first time it enters this routine.
@@ -94,7 +93,6 @@ smp_panic:
94/* CPUID in bootbus can be found at PA 0xff0140000 */ 93/* CPUID in bootbus can be found at PA 0xff0140000 */
95#define SUN4D_BOOTBUS_CPUID 0xf0140000 94#define SUN4D_BOOTBUS_CPUID 0xf0140000
96 95
97 __CPUINIT
98 .align 4 96 .align 4
99 97
100sun4d_cpu_startup: 98sun4d_cpu_startup:
@@ -146,7 +144,6 @@ sun4d_cpu_startup:
146 144
147 b,a smp_panic 145 b,a smp_panic
148 146
149 __CPUINIT
150 .align 4 147 .align 4
151 .global leon_smp_cpu_startup, smp_penguin_ctable 148 .global leon_smp_cpu_startup, smp_penguin_ctable
152 149
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index 2e973a26fbda..e0b1e13a0736 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -32,13 +32,11 @@ itlb_load:
32dtlb_load: 32dtlb_load:
33 .asciz "SUNW,dtlb-load" 33 .asciz "SUNW,dtlb-load"
34 34
35 /* XXX __cpuinit this thing XXX */
36#define TRAMP_STACK_SIZE 1024 35#define TRAMP_STACK_SIZE 1024
37 .align 16 36 .align 16
38tramp_stack: 37tramp_stack:
39 .skip TRAMP_STACK_SIZE 38 .skip TRAMP_STACK_SIZE
40 39
41 __CPUINIT
42 .align 8 40 .align 8
43 .globl sparc64_cpu_startup, sparc64_cpu_startup_end 41 .globl sparc64_cpu_startup, sparc64_cpu_startup_end
44sparc64_cpu_startup: 42sparc64_cpu_startup:
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index a9c42a7ffb6a..ed82edad1a39 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1694,7 +1694,7 @@ static void __init sun4v_ktsb_init(void)
1694#endif 1694#endif
1695} 1695}
1696 1696
1697void __cpuinit sun4v_ktsb_register(void) 1697void sun4v_ktsb_register(void)
1698{ 1698{
1699 unsigned long pa, ret; 1699 unsigned long pa, ret;
1700 1700
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 036c2797dece..5d721df48a72 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -858,7 +858,7 @@ static void __init map_kernel(void)
858 } 858 }
859} 859}
860 860
861void (*poke_srmmu)(void) __cpuinitdata = NULL; 861void (*poke_srmmu)(void) = NULL;
862 862
863extern unsigned long bootmem_init(unsigned long *pages_avail); 863extern unsigned long bootmem_init(unsigned long *pages_avail);
864 864
@@ -1055,7 +1055,7 @@ static void __init init_vac_layout(void)
1055 (int)vac_cache_size, (int)vac_line_size); 1055 (int)vac_cache_size, (int)vac_line_size);
1056} 1056}
1057 1057
1058static void __cpuinit poke_hypersparc(void) 1058static void poke_hypersparc(void)
1059{ 1059{
1060 volatile unsigned long clear; 1060 volatile unsigned long clear;
1061 unsigned long mreg = srmmu_get_mmureg(); 1061 unsigned long mreg = srmmu_get_mmureg();
@@ -1107,7 +1107,7 @@ static void __init init_hypersparc(void)
1107 hypersparc_setup_blockops(); 1107 hypersparc_setup_blockops();
1108} 1108}
1109 1109
1110static void __cpuinit poke_swift(void) 1110static void poke_swift(void)
1111{ 1111{
1112 unsigned long mreg; 1112 unsigned long mreg;
1113 1113
@@ -1287,7 +1287,7 @@ static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long
1287} 1287}
1288 1288
1289 1289
1290static void __cpuinit poke_turbosparc(void) 1290static void poke_turbosparc(void)
1291{ 1291{
1292 unsigned long mreg = srmmu_get_mmureg(); 1292 unsigned long mreg = srmmu_get_mmureg();
1293 unsigned long ccreg; 1293 unsigned long ccreg;
@@ -1350,7 +1350,7 @@ static void __init init_turbosparc(void)
1350 poke_srmmu = poke_turbosparc; 1350 poke_srmmu = poke_turbosparc;
1351} 1351}
1352 1352
1353static void __cpuinit poke_tsunami(void) 1353static void poke_tsunami(void)
1354{ 1354{
1355 unsigned long mreg = srmmu_get_mmureg(); 1355 unsigned long mreg = srmmu_get_mmureg();
1356 1356
@@ -1391,7 +1391,7 @@ static void __init init_tsunami(void)
1391 tsunami_setup_blockops(); 1391 tsunami_setup_blockops();
1392} 1392}
1393 1393
1394static void __cpuinit poke_viking(void) 1394static void poke_viking(void)
1395{ 1395{
1396 unsigned long mreg = srmmu_get_mmureg(); 1396 unsigned long mreg = srmmu_get_mmureg();
1397 static int smp_catch; 1397 static int smp_catch;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 02e628065012..3ccf2cd7182e 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -220,7 +220,7 @@ void __init init_IRQ(void)
220 ipi_init(); 220 ipi_init();
221} 221}
222 222
223void __cpuinit setup_irq_regs(void) 223void setup_irq_regs(void)
224{ 224{
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 0858ee6b520f..00331af9525d 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -25,7 +25,7 @@
25/* All messages are stored here */ 25/* All messages are stored here */
26static DEFINE_PER_CPU(HV_MsgState, msg_state); 26static DEFINE_PER_CPU(HV_MsgState, msg_state);
27 27
28void __cpuinit init_messaging(void) 28void init_messaging(void)
29{ 29{
30 /* Allocate storage for messages in kernel space */ 30 /* Allocate storage for messages in kernel space */
31 HV_MsgState *state = &__get_cpu_var(msg_state); 31 HV_MsgState *state = &__get_cpu_var(msg_state);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 68b542677f6a..eceb8344280f 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -58,8 +58,8 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
58EXPORT_SYMBOL(node_data); 58EXPORT_SYMBOL(node_data);
59 59
60/* Information on the NUMA nodes that we compute early */ 60/* Information on the NUMA nodes that we compute early */
61unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; 61unsigned long node_start_pfn[MAX_NUMNODES];
62unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; 62unsigned long node_end_pfn[MAX_NUMNODES];
63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; 63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; 64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
65unsigned long __initdata node_free_pfn[MAX_NUMNODES]; 65unsigned long __initdata node_free_pfn[MAX_NUMNODES];
@@ -84,7 +84,7 @@ unsigned long __initdata boot_pc = (unsigned long)start_kernel;
84 84
85#ifdef CONFIG_HIGHMEM 85#ifdef CONFIG_HIGHMEM
86/* Page frame index of end of lowmem on each controller. */ 86/* Page frame index of end of lowmem on each controller. */
87unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; 87unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
88 88
89/* Number of pages that can be mapped into lowmem. */ 89/* Number of pages that can be mapped into lowmem. */
90static unsigned long __initdata mappable_physpages; 90static unsigned long __initdata mappable_physpages;
@@ -290,7 +290,7 @@ static void *__init setup_pa_va_mapping(void)
290 * This is up to 4 mappings for lowmem, one mapping per memory 290 * This is up to 4 mappings for lowmem, one mapping per memory
291 * controller, plus one for our text segment. 291 * controller, plus one for our text segment.
292 */ 292 */
293static void __cpuinit store_permanent_mappings(void) 293static void store_permanent_mappings(void)
294{ 294{
295 int i; 295 int i;
296 296
@@ -935,7 +935,7 @@ subsys_initcall(topology_init);
935 * So the values we set up here in the hypervisor may be overridden on 935 * So the values we set up here in the hypervisor may be overridden on
936 * the boot cpu as arguments are parsed. 936 * the boot cpu as arguments are parsed.
937 */ 937 */
938static __cpuinit void init_super_pages(void) 938static void init_super_pages(void)
939{ 939{
940#ifdef CONFIG_HUGETLB_SUPER_PAGES 940#ifdef CONFIG_HUGETLB_SUPER_PAGES
941 int i; 941 int i;
@@ -950,7 +950,7 @@ static __cpuinit void init_super_pages(void)
950 * 950 *
951 * Called from setup_arch() on the boot cpu, or online_secondary(). 951 * Called from setup_arch() on the boot cpu, or online_secondary().
952 */ 952 */
953void __cpuinit setup_cpu(int boot) 953void setup_cpu(int boot)
954{ 954{
955 /* The boot cpu sets up its permanent mappings much earlier. */ 955 /* The boot cpu sets up its permanent mappings much earlier. */
956 if (!boot) 956 if (!boot)
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 44bab29bf2f3..a535655b7089 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -133,14 +133,14 @@ static __init int reset_init_affinity(void)
133} 133}
134late_initcall(reset_init_affinity); 134late_initcall(reset_init_affinity);
135 135
136static struct cpumask cpu_started __cpuinitdata; 136static struct cpumask cpu_started;
137 137
138/* 138/*
139 * Activate a secondary processor. Very minimal; don't add anything 139 * Activate a secondary processor. Very minimal; don't add anything
140 * to this path without knowing what you're doing, since SMP booting 140 * to this path without knowing what you're doing, since SMP booting
141 * is pretty fragile. 141 * is pretty fragile.
142 */ 142 */
143static void __cpuinit start_secondary(void) 143static void start_secondary(void)
144{ 144{
145 int cpuid = smp_processor_id(); 145 int cpuid = smp_processor_id();
146 146
@@ -183,7 +183,7 @@ static void __cpuinit start_secondary(void)
183/* 183/*
184 * Bring a secondary processor online. 184 * Bring a secondary processor online.
185 */ 185 */
186void __cpuinit online_secondary(void) 186void online_secondary(void)
187{ 187{
188 /* 188 /*
189 * low-memory mappings have been cleared, flush them from 189 * low-memory mappings have been cleared, flush them from
@@ -210,7 +210,7 @@ void __cpuinit online_secondary(void)
210 cpu_startup_entry(CPUHP_ONLINE); 210 cpu_startup_entry(CPUHP_ONLINE);
211} 211}
212 212
213int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 213int __cpu_up(unsigned int cpu, struct task_struct *tidle)
214{ 214{
215 /* Wait 5s total for all CPUs for them to come online */ 215 /* Wait 5s total for all CPUs for them to come online */
216 static int timeout; 216 static int timeout;
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 5ac397ec6986..7c353d8c2da9 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -159,7 +159,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
159 .set_mode = tile_timer_set_mode, 159 .set_mode = tile_timer_set_mode,
160}; 160};
161 161
162void __cpuinit setup_tile_timer(void) 162void setup_tile_timer(void)
163{ 163{
164 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 164 struct clock_event_device *evt = &__get_cpu_var(tile_timer);
165 165
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h
index e584e40ee832..f2ca5702a4e2 100644
--- a/arch/um/include/shared/frame_kern.h
+++ b/arch/um/include/shared/frame_kern.h
@@ -6,13 +6,13 @@
6#ifndef __FRAME_KERN_H_ 6#ifndef __FRAME_KERN_H_
7#define __FRAME_KERN_H_ 7#define __FRAME_KERN_H_
8 8
9extern int setup_signal_stack_sc(unsigned long stack_top, int sig, 9extern int setup_signal_stack_sc(unsigned long stack_top, int sig,
10 struct k_sigaction *ka, 10 struct k_sigaction *ka,
11 struct pt_regs *regs, 11 struct pt_regs *regs,
12 sigset_t *mask); 12 sigset_t *mask);
13extern int setup_signal_stack_si(unsigned long stack_top, int sig, 13extern int setup_signal_stack_si(unsigned long stack_top, int sig,
14 struct k_sigaction *ka, 14 struct k_sigaction *ka,
15 struct pt_regs *regs, siginfo_t *info, 15 struct pt_regs *regs, struct siginfo *info,
16 sigset_t *mask); 16 sigset_t *mask);
17 17
18#endif 18#endif
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 3e831b3fd07b..f57e02e7910f 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -19,7 +19,7 @@ EXPORT_SYMBOL(unblock_signals);
19 * OK, we're invoking a handler 19 * OK, we're invoking a handler
20 */ 20 */
21static void handle_signal(struct pt_regs *regs, unsigned long signr, 21static void handle_signal(struct pt_regs *regs, unsigned long signr,
22 struct k_sigaction *ka, siginfo_t *info) 22 struct k_sigaction *ka, struct siginfo *info)
23{ 23{
24 sigset_t *oldset = sigmask_to_save(); 24 sigset_t *oldset = sigmask_to_save();
25 int singlestep = 0; 25 int singlestep = 0;
@@ -71,7 +71,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
71static int kern_do_signal(struct pt_regs *regs) 71static int kern_do_signal(struct pt_regs *regs)
72{ 72{
73 struct k_sigaction ka_copy; 73 struct k_sigaction ka_copy;
74 siginfo_t info; 74 struct siginfo info;
75 int sig, handled_sig = 0; 75 int sig, handled_sig = 0;
76 76
77 while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) { 77 while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index ff03067a3b14..007d5503f49b 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -123,7 +123,7 @@ void uml_setup_stubs(struct mm_struct *mm)
123 /* dup_mmap already holds mmap_sem */ 123 /* dup_mmap already holds mmap_sem */
124 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, 124 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
125 VM_READ | VM_MAYREAD | VM_EXEC | 125 VM_READ | VM_MAYREAD | VM_EXEC |
126 VM_MAYEXEC | VM_DONTCOPY, 126 VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
127 mm->context.stub_pages); 127 mm->context.stub_pages);
128 if (err) { 128 if (err) {
129 printk(KERN_ERR "install_special_mapping returned %d\n", err); 129 printk(KERN_ERR "install_special_mapping returned %d\n", err);
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 1d3e0c17340b..4ffb644d6c07 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -254,6 +254,6 @@ int strnlen_user(const void __user *str, int len)
254 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); 254 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
255 if (n == 0) 255 if (n == 0)
256 return count + 1; 256 return count + 1;
257 return -EFAULT; 257 return 0;
258} 258}
259EXPORT_SYMBOL(strnlen_user); 259EXPORT_SYMBOL(strnlen_user);
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index ba4398056fe9..3c4af77e51a2 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -53,6 +53,25 @@ static void __init find_tempdir(void)
53} 53}
54 54
55/* 55/*
56 * Remove bytes from the front of the buffer and refill it so that if there's a
57 * partial string that we care about, it will be completed, and we can recognize
58 * it.
59 */
60static int pop(int fd, char *buf, size_t size, size_t npop)
61{
62 ssize_t n;
63 size_t len = strlen(&buf[npop]);
64
65 memmove(buf, &buf[npop], len + 1);
66 n = read(fd, &buf[len], size - len - 1);
67 if (n < 0)
68 return -errno;
69
70 buf[len + n] = '\0';
71 return 1;
72}
73
74/*
56 * This will return 1, with the first character in buf being the 75 * This will return 1, with the first character in buf being the
57 * character following the next instance of c in the file. This will 76 * character following the next instance of c in the file. This will
58 * read the file as needed. If there's an error, -errno is returned; 77 * read the file as needed. If there's an error, -errno is returned;
@@ -61,7 +80,6 @@ static void __init find_tempdir(void)
61static int next(int fd, char *buf, size_t size, char c) 80static int next(int fd, char *buf, size_t size, char c)
62{ 81{
63 ssize_t n; 82 ssize_t n;
64 size_t len;
65 char *ptr; 83 char *ptr;
66 84
67 while ((ptr = strchr(buf, c)) == NULL) { 85 while ((ptr = strchr(buf, c)) == NULL) {
@@ -74,20 +92,129 @@ static int next(int fd, char *buf, size_t size, char c)
74 buf[n] = '\0'; 92 buf[n] = '\0';
75 } 93 }
76 94
77 ptr++; 95 return pop(fd, buf, size, ptr - buf + 1);
78 len = strlen(ptr); 96}
79 memmove(buf, ptr, len + 1); 97
98/*
99 * Decode an octal-escaped and space-terminated path of the form used by
100 * /proc/mounts. May be used to decode a path in-place. "out" must be at least
101 * as large as the input. The output is always null-terminated. "len" gets the
102 * length of the output, excluding the trailing null. Returns 0 if a full path
103 * was successfully decoded, otherwise an error.
104 */
105static int decode_path(const char *in, char *out, size_t *len)
106{
107 char *first = out;
108 int c;
109 int i;
110 int ret = -EINVAL;
111 while (1) {
112 switch (*in) {
113 case '\0':
114 goto out;
115
116 case ' ':
117 ret = 0;
118 goto out;
119
120 case '\\':
121 in++;
122 c = 0;
123 for (i = 0; i < 3; i++) {
124 if (*in < '0' || *in > '7')
125 goto out;
126 c = (c << 3) | (*in++ - '0');
127 }
128 *(unsigned char *)out++ = (unsigned char) c;
129 break;
130
131 default:
132 *out++ = *in++;
133 break;
134 }
135 }
136
137out:
138 *out = '\0';
139 *len = out - first;
140 return ret;
141}
142
143/*
144 * Computes the length of s when encoded with three-digit octal escape sequences
145 * for the characters in chars.
146 */
147static size_t octal_encoded_length(const char *s, const char *chars)
148{
149 size_t len = strlen(s);
150 while ((s = strpbrk(s, chars)) != NULL) {
151 len += 3;
152 s++;
153 }
154
155 return len;
156}
157
158enum {
159 OUTCOME_NOTHING_MOUNTED,
160 OUTCOME_TMPFS_MOUNT,
161 OUTCOME_NON_TMPFS_MOUNT,
162};
163
164/* Read a line of /proc/mounts data looking for a tmpfs mount at "path". */
165static int read_mount(int fd, char *buf, size_t bufsize, const char *path,
166 int *outcome)
167{
168 int found;
169 int match;
170 char *space;
171 size_t len;
172
173 enum {
174 MATCH_NONE,
175 MATCH_EXACT,
176 MATCH_PARENT,
177 };
178
179 found = next(fd, buf, bufsize, ' ');
180 if (found != 1)
181 return found;
80 182
81 /* 183 /*
82 * Refill the buffer so that if there's a partial string that we care 184 * If there's no following space in the buffer, then this path is
83 * about, it will be completed, and we can recognize it. 185 * truncated, so it can't be the one we're looking for.
84 */ 186 */
85 n = read(fd, &buf[len], size - len - 1); 187 space = strchr(buf, ' ');
86 if (n < 0) 188 if (space) {
87 return -errno; 189 match = MATCH_NONE;
190 if (!decode_path(buf, buf, &len)) {
191 if (!strcmp(buf, path))
192 match = MATCH_EXACT;
193 else if (!strncmp(buf, path, len)
194 && (path[len] == '/' || !strcmp(buf, "/")))
195 match = MATCH_PARENT;
196 }
197
198 found = pop(fd, buf, bufsize, space - buf + 1);
199 if (found != 1)
200 return found;
201
202 switch (match) {
203 case MATCH_EXACT:
204 if (!strncmp(buf, "tmpfs", strlen("tmpfs")))
205 *outcome = OUTCOME_TMPFS_MOUNT;
206 else
207 *outcome = OUTCOME_NON_TMPFS_MOUNT;
208 break;
88 209
89 buf[len + n] = '\0'; 210 case MATCH_PARENT:
90 return 1; 211 /* This mount obscures any previous ones. */
212 *outcome = OUTCOME_NOTHING_MOUNTED;
213 break;
214 }
215 }
216
217 return next(fd, buf, bufsize, '\n');
91} 218}
92 219
93/* which_tmpdir is called only during early boot */ 220/* which_tmpdir is called only during early boot */
@@ -106,8 +233,12 @@ static int checked_tmpdir = 0;
106 */ 233 */
107static void which_tmpdir(void) 234static void which_tmpdir(void)
108{ 235{
109 int fd, found; 236 int fd;
110 char buf[128] = { '\0' }; 237 int found;
238 int outcome;
239 char *path;
240 char *buf;
241 size_t bufsize;
111 242
112 if (checked_tmpdir) 243 if (checked_tmpdir)
113 return; 244 return;
@@ -116,49 +247,66 @@ static void which_tmpdir(void)
116 247
117 printf("Checking for tmpfs mount on /dev/shm..."); 248 printf("Checking for tmpfs mount on /dev/shm...");
118 249
250 path = realpath("/dev/shm", NULL);
251 if (!path) {
252 printf("failed to check real path, errno = %d\n", errno);
253 return;
254 }
255 printf("%s...", path);
256
257 /*
258 * The buffer needs to be able to fit the full octal-escaped path, a
259 * space, and a trailing null in order to successfully decode it.
260 */
261 bufsize = octal_encoded_length(path, " \t\n\\") + 2;
262
263 if (bufsize < 128)
264 bufsize = 128;
265
266 buf = malloc(bufsize);
267 if (!buf) {
268 printf("malloc failed, errno = %d\n", errno);
269 goto out;
270 }
271 buf[0] = '\0';
272
119 fd = open("/proc/mounts", O_RDONLY); 273 fd = open("/proc/mounts", O_RDONLY);
120 if (fd < 0) { 274 if (fd < 0) {
121 printf("failed to open /proc/mounts, errno = %d\n", errno); 275 printf("failed to open /proc/mounts, errno = %d\n", errno);
122 return; 276 goto out1;
123 } 277 }
124 278
279 outcome = OUTCOME_NOTHING_MOUNTED;
125 while (1) { 280 while (1) {
126 found = next(fd, buf, ARRAY_SIZE(buf), ' '); 281 found = read_mount(fd, buf, bufsize, path, &outcome);
127 if (found != 1)
128 break;
129
130 if (!strncmp(buf, "/dev/shm", strlen("/dev/shm")))
131 goto found;
132
133 found = next(fd, buf, ARRAY_SIZE(buf), '\n');
134 if (found != 1) 282 if (found != 1)
135 break; 283 break;
136 } 284 }
137 285
138err: 286 if (found < 0) {
139 if (found == 0)
140 printf("nothing mounted on /dev/shm\n");
141 else if (found < 0)
142 printf("read returned errno %d\n", -found); 287 printf("read returned errno %d\n", -found);
288 } else {
289 switch (outcome) {
290 case OUTCOME_TMPFS_MOUNT:
291 printf("OK\n");
292 default_tmpdir = "/dev/shm";
293 break;
143 294
144out: 295 case OUTCOME_NON_TMPFS_MOUNT:
145 close(fd); 296 printf("not tmpfs\n");
146 297 break;
147 return;
148
149found:
150 found = next(fd, buf, ARRAY_SIZE(buf), ' ');
151 if (found != 1)
152 goto err;
153 298
154 if (strncmp(buf, "tmpfs", strlen("tmpfs"))) { 299 default:
155 printf("not tmpfs\n"); 300 printf("nothing mounted on /dev/shm\n");
156 goto out; 301 break;
302 }
157 } 303 }
158 304
159 printf("OK\n"); 305 close(fd);
160 default_tmpdir = "/dev/shm"; 306out1:
161 goto out; 307 free(buf);
308out:
309 free(path);
162} 310}
163 311
164static int __init make_tempfile(const char *template, char **out_tempname, 312static int __init make_tempfile(const char *template, char **out_tempname,
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 9d9f1b4bf826..905924b773d3 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -25,7 +25,7 @@ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
25 [SIGIO] = sigio_handler, 25 [SIGIO] = sigio_handler,
26 [SIGVTALRM] = timer_handler }; 26 [SIGVTALRM] = timer_handler };
27 27
28static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc) 28static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
29{ 29{
30 struct uml_pt_regs r; 30 struct uml_pt_regs r;
31 int save_errno = errno; 31 int save_errno = errno;
@@ -61,7 +61,7 @@ static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc)
61static int signals_enabled; 61static int signals_enabled;
62static unsigned int signals_pending; 62static unsigned int signals_pending;
63 63
64void sig_handler(int sig, siginfo_t *si, mcontext_t *mc) 64void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
65{ 65{
66 int enabled; 66 int enabled;
67 67
@@ -120,7 +120,7 @@ void set_sigstack(void *sig_stack, int size)
120 panic("enabling signal stack failed, errno = %d\n", errno); 120 panic("enabling signal stack failed, errno = %d\n", errno);
121} 121}
122 122
123static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = { 123static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
124 [SIGSEGV] = sig_handler, 124 [SIGSEGV] = sig_handler,
125 [SIGBUS] = sig_handler, 125 [SIGBUS] = sig_handler,
126 [SIGILL] = sig_handler, 126 [SIGILL] = sig_handler,
@@ -162,7 +162,7 @@ static void hard_handler(int sig, siginfo_t *si, void *p)
162 while ((sig = ffs(pending)) != 0){ 162 while ((sig = ffs(pending)) != 0){
163 sig--; 163 sig--;
164 pending &= ~(1 << sig); 164 pending &= ~(1 << sig);
165 (*handlers[sig])(sig, si, mc); 165 (*handlers[sig])(sig, (struct siginfo *)si, mc);
166 } 166 }
167 167
168 /* 168 /*
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 4625949bf1e4..d531879a4617 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
54 54
55void wait_stub_done(int pid) 55void wait_stub_done(int pid)
56{ 56{
57 int n, status, err; 57 int n, status, err, bad_stop = 0;
58 58
59 while (1) { 59 while (1) {
60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,6 +74,8 @@ void wait_stub_done(int pid)
74 74
75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 return; 76 return;
77 else
78 bad_stop = 1;
77 79
78bad_wait: 80bad_wait:
79 err = ptrace_dump_regs(pid); 81 err = ptrace_dump_regs(pid);
@@ -83,7 +85,10 @@ bad_wait:
83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 85 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 86 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
85 status); 87 status);
86 fatal_sigsegv(); 88 if (bad_stop)
89 kill(pid, SIGKILL);
90 else
91 fatal_sigsegv();
87} 92}
88 93
89extern unsigned long current_stub_stack(void); 94extern unsigned long current_stub_stack(void);
@@ -409,7 +414,7 @@ void userspace(struct uml_pt_regs *regs)
409 if (WIFSTOPPED(status)) { 414 if (WIFSTOPPED(status)) {
410 int sig = WSTOPSIG(status); 415 int sig = WSTOPSIG(status);
411 416
412 ptrace(PTRACE_GETSIGINFO, pid, 0, &si); 417 ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
413 418
414 switch (sig) { 419 switch (sig) {
415 case SIGSEGV: 420 case SIGSEGV:
@@ -417,7 +422,7 @@ void userspace(struct uml_pt_regs *regs)
417 !ptrace_faultinfo) { 422 !ptrace_faultinfo) {
418 get_skas_faultinfo(pid, 423 get_skas_faultinfo(pid,
419 &regs->faultinfo); 424 &regs->faultinfo);
420 (*sig_info[SIGSEGV])(SIGSEGV, &si, 425 (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
421 regs); 426 regs);
422 } 427 }
423 else handle_segv(pid, regs); 428 else handle_segv(pid, regs);
@@ -426,14 +431,14 @@ void userspace(struct uml_pt_regs *regs)
426 handle_trap(pid, regs, local_using_sysemu); 431 handle_trap(pid, regs, local_using_sysemu);
427 break; 432 break;
428 case SIGTRAP: 433 case SIGTRAP:
429 relay_signal(SIGTRAP, &si, regs); 434 relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
430 break; 435 break;
431 case SIGVTALRM: 436 case SIGVTALRM:
432 now = os_nsecs(); 437 now = os_nsecs();
433 if (now < nsecs) 438 if (now < nsecs)
434 break; 439 break;
435 block_signals(); 440 block_signals();
436 (*sig_info[sig])(sig, &si, regs); 441 (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
437 unblock_signals(); 442 unblock_signals();
438 nsecs = timer.it_value.tv_sec * 443 nsecs = timer.it_value.tv_sec *
439 UM_NSEC_PER_SEC + 444 UM_NSEC_PER_SEC +
@@ -447,7 +452,7 @@ void userspace(struct uml_pt_regs *regs)
447 case SIGFPE: 452 case SIGFPE:
448 case SIGWINCH: 453 case SIGWINCH:
449 block_signals(); 454 block_signals();
450 (*sig_info[sig])(sig, &si, regs); 455 (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
451 unblock_signals(); 456 unblock_signals();
452 break; 457 break;
453 default: 458 default:
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index d606463aa6d6..b7388a425f09 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr)
225 unsigned long nr_pages; 225 unsigned long nr_pages;
226 226
227 nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; 227 nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
228 efi_call_phys2(sys_table->boottime->free_pages, addr, size); 228 efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
229} 229}
230 230
231static void find_bits(unsigned long mask, u8 *pos, u8 *size) 231static void find_bits(unsigned long mask, u8 *pos, u8 *size)
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..6c63c358a7e6 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o 27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o 28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o 29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
30obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
31 30
32# These modules require assembler to support AVX. 31# These modules require assembler to support AVX.
33ifeq ($(avx_supported),yes) 32ifeq ($(avx_supported),yes)
@@ -82,4 +81,3 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
82crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o 81crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
83sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o 82sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
84sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o 83sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
85crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
deleted file mode 100644
index 35e97569d05f..000000000000
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ /dev/null
@@ -1,643 +0,0 @@
1########################################################################
2# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
3#
4# Copyright (c) 2013, Intel Corporation
5#
6# Authors:
7# Erdinc Ozturk <erdinc.ozturk@intel.com>
8# Vinodh Gopal <vinodh.gopal@intel.com>
9# James Guilford <james.guilford@intel.com>
10# Tim Chen <tim.c.chen@linux.intel.com>
11#
12# This software is available to you under a choice of one of two
13# licenses. You may choose to be licensed under the terms of the GNU
14# General Public License (GPL) Version 2, available from the file
15# COPYING in the main directory of this source tree, or the
16# OpenIB.org BSD license below:
17#
18# Redistribution and use in source and binary forms, with or without
19# modification, are permitted provided that the following conditions are
20# met:
21#
22# * Redistributions of source code must retain the above copyright
23# notice, this list of conditions and the following disclaimer.
24#
25# * Redistributions in binary form must reproduce the above copyright
26# notice, this list of conditions and the following disclaimer in the
27# documentation and/or other materials provided with the
28# distribution.
29#
30# * Neither the name of the Intel Corporation nor the names of its
31# contributors may be used to endorse or promote products derived from
32# this software without specific prior written permission.
33#
34#
35# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
36# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
39# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
40# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
41# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
42# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46########################################################################
47# Function API:
48# UINT16 crc_t10dif_pcl(
49# UINT16 init_crc, //initial CRC value, 16 bits
50# const unsigned char *buf, //buffer pointer to calculate CRC on
51# UINT64 len //buffer length in bytes (64-bit data)
52# );
53#
54# Reference paper titled "Fast CRC Computation for Generic
55# Polynomials Using PCLMULQDQ Instruction"
56# URL: http://www.intel.com/content/dam/www/public/us/en/documents
57# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
58#
59#
60
61#include <linux/linkage.h>
62
63.text
64
65#define arg1 %rdi
66#define arg2 %rsi
67#define arg3 %rdx
68
69#define arg1_low32 %edi
70
71ENTRY(crc_t10dif_pcl)
72.align 16
73
74 # adjust the 16-bit initial_crc value, scale it to 32 bits
75 shl $16, arg1_low32
76
77 # Allocate Stack Space
78 mov %rsp, %rcx
79 sub $16*2, %rsp
80 # align stack to 16 byte boundary
81 and $~(0x10 - 1), %rsp
82
83 # check if smaller than 256
84 cmp $256, arg3
85
86 # for sizes less than 128, we can't fold 64B at a time...
87 jl _less_than_128
88
89
90 # load the initial crc value
91 movd arg1_low32, %xmm10 # initial crc
92
93 # crc value does not need to be byte-reflected, but it needs
94 # to be moved to the high part of the register.
95 # because data will be byte-reflected and will align with
96 # initial crc at correct place.
97 pslldq $12, %xmm10
98
99 movdqa SHUF_MASK(%rip), %xmm11
100 # receive the initial 64B data, xor the initial crc value
101 movdqu 16*0(arg2), %xmm0
102 movdqu 16*1(arg2), %xmm1
103 movdqu 16*2(arg2), %xmm2
104 movdqu 16*3(arg2), %xmm3
105 movdqu 16*4(arg2), %xmm4
106 movdqu 16*5(arg2), %xmm5
107 movdqu 16*6(arg2), %xmm6
108 movdqu 16*7(arg2), %xmm7
109
110 pshufb %xmm11, %xmm0
111 # XOR the initial_crc value
112 pxor %xmm10, %xmm0
113 pshufb %xmm11, %xmm1
114 pshufb %xmm11, %xmm2
115 pshufb %xmm11, %xmm3
116 pshufb %xmm11, %xmm4
117 pshufb %xmm11, %xmm5
118 pshufb %xmm11, %xmm6
119 pshufb %xmm11, %xmm7
120
121 movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4
122 #imm value of pclmulqdq instruction
123 #will determine which constant to use
124
125 #################################################################
126 # we subtract 256 instead of 128 to save one instruction from the loop
127 sub $256, arg3
128
129 # at this section of the code, there is 64*x+y (0<=y<64) bytes of
130 # buffer. The _fold_64_B_loop will fold 64B at a time
131 # until we have 64+y Bytes of buffer
132
133
134 # fold 64B at a time. This section of the code folds 4 xmm
135 # registers in parallel
136_fold_64_B_loop:
137
138 # update the buffer pointer
139 add $128, arg2 # buf += 64#
140
141 movdqu 16*0(arg2), %xmm9
142 movdqu 16*1(arg2), %xmm12
143 pshufb %xmm11, %xmm9
144 pshufb %xmm11, %xmm12
145 movdqa %xmm0, %xmm8
146 movdqa %xmm1, %xmm13
147 pclmulqdq $0x0 , %xmm10, %xmm0
148 pclmulqdq $0x11, %xmm10, %xmm8
149 pclmulqdq $0x0 , %xmm10, %xmm1
150 pclmulqdq $0x11, %xmm10, %xmm13
151 pxor %xmm9 , %xmm0
152 xorps %xmm8 , %xmm0
153 pxor %xmm12, %xmm1
154 xorps %xmm13, %xmm1
155
156 movdqu 16*2(arg2), %xmm9
157 movdqu 16*3(arg2), %xmm12
158 pshufb %xmm11, %xmm9
159 pshufb %xmm11, %xmm12
160 movdqa %xmm2, %xmm8
161 movdqa %xmm3, %xmm13
162 pclmulqdq $0x0, %xmm10, %xmm2
163 pclmulqdq $0x11, %xmm10, %xmm8
164 pclmulqdq $0x0, %xmm10, %xmm3
165 pclmulqdq $0x11, %xmm10, %xmm13
166 pxor %xmm9 , %xmm2
167 xorps %xmm8 , %xmm2
168 pxor %xmm12, %xmm3
169 xorps %xmm13, %xmm3
170
171 movdqu 16*4(arg2), %xmm9
172 movdqu 16*5(arg2), %xmm12
173 pshufb %xmm11, %xmm9
174 pshufb %xmm11, %xmm12
175 movdqa %xmm4, %xmm8
176 movdqa %xmm5, %xmm13
177 pclmulqdq $0x0, %xmm10, %xmm4
178 pclmulqdq $0x11, %xmm10, %xmm8
179 pclmulqdq $0x0, %xmm10, %xmm5
180 pclmulqdq $0x11, %xmm10, %xmm13
181 pxor %xmm9 , %xmm4
182 xorps %xmm8 , %xmm4
183 pxor %xmm12, %xmm5
184 xorps %xmm13, %xmm5
185
186 movdqu 16*6(arg2), %xmm9
187 movdqu 16*7(arg2), %xmm12
188 pshufb %xmm11, %xmm9
189 pshufb %xmm11, %xmm12
190 movdqa %xmm6 , %xmm8
191 movdqa %xmm7 , %xmm13
192 pclmulqdq $0x0 , %xmm10, %xmm6
193 pclmulqdq $0x11, %xmm10, %xmm8
194 pclmulqdq $0x0 , %xmm10, %xmm7
195 pclmulqdq $0x11, %xmm10, %xmm13
196 pxor %xmm9 , %xmm6
197 xorps %xmm8 , %xmm6
198 pxor %xmm12, %xmm7
199 xorps %xmm13, %xmm7
200
201 sub $128, arg3
202
203 # check if there is another 64B in the buffer to be able to fold
204 jge _fold_64_B_loop
205 ##################################################################
206
207
208 add $128, arg2
209 # at this point, the buffer pointer is pointing at the last y Bytes
210 # of the buffer the 64B of folded data is in 4 of the xmm
211 # registers: xmm0, xmm1, xmm2, xmm3
212
213
214 # fold the 8 xmm registers to 1 xmm register with different constants
215
216 movdqa rk9(%rip), %xmm10
217 movdqa %xmm0, %xmm8
218 pclmulqdq $0x11, %xmm10, %xmm0
219 pclmulqdq $0x0 , %xmm10, %xmm8
220 pxor %xmm8, %xmm7
221 xorps %xmm0, %xmm7
222
223 movdqa rk11(%rip), %xmm10
224 movdqa %xmm1, %xmm8
225 pclmulqdq $0x11, %xmm10, %xmm1
226 pclmulqdq $0x0 , %xmm10, %xmm8
227 pxor %xmm8, %xmm7
228 xorps %xmm1, %xmm7
229
230 movdqa rk13(%rip), %xmm10
231 movdqa %xmm2, %xmm8
232 pclmulqdq $0x11, %xmm10, %xmm2
233 pclmulqdq $0x0 , %xmm10, %xmm8
234 pxor %xmm8, %xmm7
235 pxor %xmm2, %xmm7
236
237 movdqa rk15(%rip), %xmm10
238 movdqa %xmm3, %xmm8
239 pclmulqdq $0x11, %xmm10, %xmm3
240 pclmulqdq $0x0 , %xmm10, %xmm8
241 pxor %xmm8, %xmm7
242 xorps %xmm3, %xmm7
243
244 movdqa rk17(%rip), %xmm10
245 movdqa %xmm4, %xmm8
246 pclmulqdq $0x11, %xmm10, %xmm4
247 pclmulqdq $0x0 , %xmm10, %xmm8
248 pxor %xmm8, %xmm7
249 pxor %xmm4, %xmm7
250
251 movdqa rk19(%rip), %xmm10
252 movdqa %xmm5, %xmm8
253 pclmulqdq $0x11, %xmm10, %xmm5
254 pclmulqdq $0x0 , %xmm10, %xmm8
255 pxor %xmm8, %xmm7
256 xorps %xmm5, %xmm7
257
258 movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2
259 #imm value of pclmulqdq instruction
260 #will determine which constant to use
261 movdqa %xmm6, %xmm8
262 pclmulqdq $0x11, %xmm10, %xmm6
263 pclmulqdq $0x0 , %xmm10, %xmm8
264 pxor %xmm8, %xmm7
265 pxor %xmm6, %xmm7
266
267
268 # instead of 64, we add 48 to the loop counter to save 1 instruction
269 # from the loop instead of a cmp instruction, we use the negative
270 # flag with the jl instruction
271 add $128-16, arg3
272 jl _final_reduction_for_128
273
274 # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7
275 # and the rest is in memory. We can fold 16 bytes at a time if y>=16
276 # continue folding 16B at a time
277
278_16B_reduction_loop:
279 movdqa %xmm7, %xmm8
280 pclmulqdq $0x11, %xmm10, %xmm7
281 pclmulqdq $0x0 , %xmm10, %xmm8
282 pxor %xmm8, %xmm7
283 movdqu (arg2), %xmm0
284 pshufb %xmm11, %xmm0
285 pxor %xmm0 , %xmm7
286 add $16, arg2
287 sub $16, arg3
288 # instead of a cmp instruction, we utilize the flags with the
289 # jge instruction equivalent of: cmp arg3, 16-16
290 # check if there is any more 16B in the buffer to be able to fold
291 jge _16B_reduction_loop
292
293 #now we have 16+z bytes left to reduce, where 0<= z < 16.
294 #first, we reduce the data in the xmm7 register
295
296
297_final_reduction_for_128:
298 # check if any more data to fold. If not, compute the CRC of
299 # the final 128 bits
300 add $16, arg3
301 je _128_done
302
303 # here we are getting data that is less than 16 bytes.
304 # since we know that there was data before the pointer, we can
305 # offset the input pointer before the actual point, to receive
306 # exactly 16 bytes. after that the registers need to be adjusted.
307_get_last_two_xmms:
308 movdqa %xmm7, %xmm2
309
310 movdqu -16(arg2, arg3), %xmm1
311 pshufb %xmm11, %xmm1
312
313 # get rid of the extra data that was loaded before
314 # load the shift constant
315 lea pshufb_shf_table+16(%rip), %rax
316 sub arg3, %rax
317 movdqu (%rax), %xmm0
318
319 # shift xmm2 to the left by arg3 bytes
320 pshufb %xmm0, %xmm2
321
322 # shift xmm7 to the right by 16-arg3 bytes
323 pxor mask1(%rip), %xmm0
324 pshufb %xmm0, %xmm7
325 pblendvb %xmm2, %xmm1 #xmm0 is implicit
326
327 # fold 16 Bytes
328 movdqa %xmm1, %xmm2
329 movdqa %xmm7, %xmm8
330 pclmulqdq $0x11, %xmm10, %xmm7
331 pclmulqdq $0x0 , %xmm10, %xmm8
332 pxor %xmm8, %xmm7
333 pxor %xmm2, %xmm7
334
335_128_done:
336 # compute crc of a 128-bit value
337 movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10
338 movdqa %xmm7, %xmm0
339
340 #64b fold
341 pclmulqdq $0x1, %xmm10, %xmm7
342 pslldq $8 , %xmm0
343 pxor %xmm0, %xmm7
344
345 #32b fold
346 movdqa %xmm7, %xmm0
347
348 pand mask2(%rip), %xmm0
349
350 psrldq $12, %xmm7
351 pclmulqdq $0x10, %xmm10, %xmm7
352 pxor %xmm0, %xmm7
353
354 #barrett reduction
355_barrett:
356 movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10
357 movdqa %xmm7, %xmm0
358 pclmulqdq $0x01, %xmm10, %xmm7
359 pslldq $4, %xmm7
360 pclmulqdq $0x11, %xmm10, %xmm7
361
362 pslldq $4, %xmm7
363 pxor %xmm0, %xmm7
364 pextrd $1, %xmm7, %eax
365
366_cleanup:
367 # scale the result back to 16 bits
368 shr $16, %eax
369 mov %rcx, %rsp
370 ret
371
372########################################################################
373
374.align 16
375_less_than_128:
376
377 # check if there is enough buffer to be able to fold 16B at a time
378 cmp $32, arg3
379 jl _less_than_32
380 movdqa SHUF_MASK(%rip), %xmm11
381
382 # now if there is, load the constants
383 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
384
385 movd arg1_low32, %xmm0 # get the initial crc value
386 pslldq $12, %xmm0 # align it to its correct place
387 movdqu (arg2), %xmm7 # load the plaintext
388 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
389 pxor %xmm0, %xmm7
390
391
392 # update the buffer pointer
393 add $16, arg2
394
395 # update the counter. subtract 32 instead of 16 to save one
396 # instruction from the loop
397 sub $32, arg3
398
399 jmp _16B_reduction_loop
400
401
402.align 16
403_less_than_32:
404 # mov initial crc to the return value. this is necessary for
405 # zero-length buffers.
406 mov arg1_low32, %eax
407 test arg3, arg3
408 je _cleanup
409
410 movdqa SHUF_MASK(%rip), %xmm11
411
412 movd arg1_low32, %xmm0 # get the initial crc value
413 pslldq $12, %xmm0 # align it to its correct place
414
415 cmp $16, arg3
416 je _exact_16_left
417 jl _less_than_16_left
418
419 movdqu (arg2), %xmm7 # load the plaintext
420 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
421 pxor %xmm0 , %xmm7 # xor the initial crc value
422 add $16, arg2
423 sub $16, arg3
424 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
425 jmp _get_last_two_xmms
426
427
428.align 16
429_less_than_16_left:
430 # use stack space to load data less than 16 bytes, zero-out
431 # the 16B in memory first.
432
433 pxor %xmm1, %xmm1
434 mov %rsp, %r11
435 movdqa %xmm1, (%r11)
436
437 cmp $4, arg3
438 jl _only_less_than_4
439
440 # backup the counter value
441 mov arg3, %r9
442 cmp $8, arg3
443 jl _less_than_8_left
444
445 # load 8 Bytes
446 mov (arg2), %rax
447 mov %rax, (%r11)
448 add $8, %r11
449 sub $8, arg3
450 add $8, arg2
451_less_than_8_left:
452
453 cmp $4, arg3
454 jl _less_than_4_left
455
456 # load 4 Bytes
457 mov (arg2), %eax
458 mov %eax, (%r11)
459 add $4, %r11
460 sub $4, arg3
461 add $4, arg2
462_less_than_4_left:
463
464 cmp $2, arg3
465 jl _less_than_2_left
466
467 # load 2 Bytes
468 mov (arg2), %ax
469 mov %ax, (%r11)
470 add $2, %r11
471 sub $2, arg3
472 add $2, arg2
473_less_than_2_left:
474 cmp $1, arg3
475 jl _zero_left
476
477 # load 1 Byte
478 mov (arg2), %al
479 mov %al, (%r11)
480_zero_left:
481 movdqa (%rsp), %xmm7
482 pshufb %xmm11, %xmm7
483 pxor %xmm0 , %xmm7 # xor the initial crc value
484
485 # shl r9, 4
486 lea pshufb_shf_table+16(%rip), %rax
487 sub %r9, %rax
488 movdqu (%rax), %xmm0
489 pxor mask1(%rip), %xmm0
490
491 pshufb %xmm0, %xmm7
492 jmp _128_done
493
494.align 16
495_exact_16_left:
496 movdqu (arg2), %xmm7
497 pshufb %xmm11, %xmm7
498 pxor %xmm0 , %xmm7 # xor the initial crc value
499
500 jmp _128_done
501
502_only_less_than_4:
503 cmp $3, arg3
504 jl _only_less_than_3
505
506 # load 3 Bytes
507 mov (arg2), %al
508 mov %al, (%r11)
509
510 mov 1(arg2), %al
511 mov %al, 1(%r11)
512
513 mov 2(arg2), %al
514 mov %al, 2(%r11)
515
516 movdqa (%rsp), %xmm7
517 pshufb %xmm11, %xmm7
518 pxor %xmm0 , %xmm7 # xor the initial crc value
519
520 psrldq $5, %xmm7
521
522 jmp _barrett
523_only_less_than_3:
524 cmp $2, arg3
525 jl _only_less_than_2
526
527 # load 2 Bytes
528 mov (arg2), %al
529 mov %al, (%r11)
530
531 mov 1(arg2), %al
532 mov %al, 1(%r11)
533
534 movdqa (%rsp), %xmm7
535 pshufb %xmm11, %xmm7
536 pxor %xmm0 , %xmm7 # xor the initial crc value
537
538 psrldq $6, %xmm7
539
540 jmp _barrett
541_only_less_than_2:
542
543 # load 1 Byte
544 mov (arg2), %al
545 mov %al, (%r11)
546
547 movdqa (%rsp), %xmm7
548 pshufb %xmm11, %xmm7
549 pxor %xmm0 , %xmm7 # xor the initial crc value
550
551 psrldq $7, %xmm7
552
553 jmp _barrett
554
555ENDPROC(crc_t10dif_pcl)
556
557.data
558
559# precomputed constants
560# these constants are precomputed from the poly:
561# 0x8bb70000 (0x8bb7 scaled to 32 bits)
562.align 16
563# Q = 0x18BB70000
564# rk1 = 2^(32*3) mod Q << 32
565# rk2 = 2^(32*5) mod Q << 32
566# rk3 = 2^(32*15) mod Q << 32
567# rk4 = 2^(32*17) mod Q << 32
568# rk5 = 2^(32*3) mod Q << 32
569# rk6 = 2^(32*2) mod Q << 32
570# rk7 = floor(2^64/Q)
571# rk8 = Q
572rk1:
573.quad 0x2d56000000000000
574rk2:
575.quad 0x06df000000000000
576rk3:
577.quad 0x9d9d000000000000
578rk4:
579.quad 0x7cf5000000000000
580rk5:
581.quad 0x2d56000000000000
582rk6:
583.quad 0x1368000000000000
584rk7:
585.quad 0x00000001f65a57f8
586rk8:
587.quad 0x000000018bb70000
588
589rk9:
590.quad 0xceae000000000000
591rk10:
592.quad 0xbfd6000000000000
593rk11:
594.quad 0x1e16000000000000
595rk12:
596.quad 0x713c000000000000
597rk13:
598.quad 0xf7f9000000000000
599rk14:
600.quad 0x80a6000000000000
601rk15:
602.quad 0x044c000000000000
603rk16:
604.quad 0xe658000000000000
605rk17:
606.quad 0xad18000000000000
607rk18:
608.quad 0xa497000000000000
609rk19:
610.quad 0x6ee3000000000000
611rk20:
612.quad 0xe7b5000000000000
613
614
615
616mask1:
617.octa 0x80808080808080808080808080808080
618mask2:
619.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
620
621SHUF_MASK:
622.octa 0x000102030405060708090A0B0C0D0E0F
623
624pshufb_shf_table:
625# use these values for shift constants for the pshufb instruction
626# different alignments result in values as shown:
627# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
628# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
629# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
630# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
631# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
632# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
633# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
634# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
635# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
636# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
637# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
638# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
639# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
640# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
641# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
642.octa 0x8f8e8d8c8b8a89888786858483828100
643.octa 0x000e0d0c0b0a09080706050403020100
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
deleted file mode 100644
index 7845d7fd54c0..000000000000
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
5 *
6 * Copyright (C) 2013 Intel Corporation
7 * Author: Tim Chen <tim.c.chen@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/crc-t10dif.h>
28#include <crypto/internal/hash.h>
29#include <linux/init.h>
30#include <linux/string.h>
31#include <linux/kernel.h>
32#include <asm/i387.h>
33#include <asm/cpufeature.h>
34#include <asm/cpu_device_id.h>
35
36asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
37 size_t len);
38
39struct chksum_desc_ctx {
40 __u16 crc;
41};
42
43/*
44 * Steps through buffer one byte at at time, calculates reflected
45 * crc using table.
46 */
47
48static int chksum_init(struct shash_desc *desc)
49{
50 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
51
52 ctx->crc = 0;
53
54 return 0;
55}
56
57static int chksum_update(struct shash_desc *desc, const u8 *data,
58 unsigned int length)
59{
60 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
61
62 if (irq_fpu_usable()) {
63 kernel_fpu_begin();
64 ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
65 kernel_fpu_end();
66 } else
67 ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
68 return 0;
69}
70
71static int chksum_final(struct shash_desc *desc, u8 *out)
72{
73 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
74
75 *(__u16 *)out = ctx->crc;
76 return 0;
77}
78
79static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
80 u8 *out)
81{
82 if (irq_fpu_usable()) {
83 kernel_fpu_begin();
84 *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
85 kernel_fpu_end();
86 } else
87 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
88 return 0;
89}
90
91static int chksum_finup(struct shash_desc *desc, const u8 *data,
92 unsigned int len, u8 *out)
93{
94 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
95
96 return __chksum_finup(&ctx->crc, data, len, out);
97}
98
99static int chksum_digest(struct shash_desc *desc, const u8 *data,
100 unsigned int length, u8 *out)
101{
102 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
103
104 return __chksum_finup(&ctx->crc, data, length, out);
105}
106
107static struct shash_alg alg = {
108 .digestsize = CRC_T10DIF_DIGEST_SIZE,
109 .init = chksum_init,
110 .update = chksum_update,
111 .final = chksum_final,
112 .finup = chksum_finup,
113 .digest = chksum_digest,
114 .descsize = sizeof(struct chksum_desc_ctx),
115 .base = {
116 .cra_name = "crct10dif",
117 .cra_driver_name = "crct10dif-pclmul",
118 .cra_priority = 200,
119 .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
120 .cra_module = THIS_MODULE,
121 }
122};
123
124static const struct x86_cpu_id crct10dif_cpu_id[] = {
125 X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
126 {}
127};
128MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
129
130static int __init crct10dif_intel_mod_init(void)
131{
132 if (!x86_match_cpu(crct10dif_cpu_id))
133 return -ENODEV;
134
135 return crypto_register_shash(&alg);
136}
137
138static void __exit crct10dif_intel_mod_fini(void)
139{
140 crypto_unregister_shash(&alg);
141}
142
143module_init(crct10dif_intel_mod_init);
144module_exit(crct10dif_intel_mod_fini);
145
146MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
147MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
148MODULE_LICENSE("GPL");
149
150MODULE_ALIAS("crct10dif");
151MODULE_ALIAS("crct10dif-pclmul");
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 5f9a1243190e..d2b12988d2ed 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -28,7 +28,7 @@ struct x86_cpu {
28#ifdef CONFIG_HOTPLUG_CPU 28#ifdef CONFIG_HOTPLUG_CPU
29extern int arch_register_cpu(int num); 29extern int arch_register_cpu(int num);
30extern void arch_unregister_cpu(int); 30extern void arch_unregister_cpu(int);
31extern void __cpuinit start_cpu0(void); 31extern void start_cpu0(void);
32#ifdef CONFIG_DEBUG_HOTPLUG_CPU0 32#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
33extern int _debug_hotplug_cpu(int cpu, int action); 33extern int _debug_hotplug_cpu(int cpu, int action);
34#endif 34#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 6bc3985ee473..f98bd6625318 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
60#ifdef CONFIG_MICROCODE_EARLY 60#ifdef CONFIG_MICROCODE_EARLY
61#define MAX_UCODE_COUNT 128 61#define MAX_UCODE_COUNT 128
62extern void __init load_ucode_bsp(void); 62extern void __init load_ucode_bsp(void);
63extern void __cpuinit load_ucode_ap(void); 63extern void load_ucode_ap(void);
64extern int __init save_microcode_in_initrd(void); 64extern int __init save_microcode_in_initrd(void);
65#else 65#else
66static inline void __init load_ucode_bsp(void) {} 66static inline void __init load_ucode_bsp(void) {}
67static inline void __cpuinit load_ucode_ap(void) {} 67static inline void load_ucode_ap(void) {}
68static inline int __init save_microcode_in_initrd(void) 68static inline int __init save_microcode_in_initrd(void)
69{ 69{
70 return 0; 70 return 0;
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index c6b043f40271..50e5c58ced23 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -67,11 +67,11 @@ extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
67extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; 67extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
68#endif 68#endif
69extern void __init load_ucode_amd_bsp(void); 69extern void __init load_ucode_amd_bsp(void);
70extern void __cpuinit load_ucode_amd_ap(void); 70extern void load_ucode_amd_ap(void);
71extern int __init save_microcode_in_initrd_amd(void); 71extern int __init save_microcode_in_initrd_amd(void);
72#else 72#else
73static inline void __init load_ucode_amd_bsp(void) {} 73static inline void __init load_ucode_amd_bsp(void) {}
74static inline void __cpuinit load_ucode_amd_ap(void) {} 74static inline void load_ucode_amd_ap(void) {}
75static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } 75static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
76#endif 76#endif
77 77
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 87a085333cbf..9067166409bf 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -65,12 +65,12 @@ update_match_revision(struct microcode_header_intel *mc_header, int rev);
65 65
66#ifdef CONFIG_MICROCODE_INTEL_EARLY 66#ifdef CONFIG_MICROCODE_INTEL_EARLY
67extern void __init load_ucode_intel_bsp(void); 67extern void __init load_ucode_intel_bsp(void);
68extern void __cpuinit load_ucode_intel_ap(void); 68extern void load_ucode_intel_ap(void);
69extern void show_ucode_info_early(void); 69extern void show_ucode_info_early(void);
70extern int __init save_microcode_in_initrd_intel(void); 70extern int __init save_microcode_in_initrd_intel(void);
71#else 71#else
72static inline __init void load_ucode_intel_bsp(void) {} 72static inline __init void load_ucode_intel_bsp(void) {}
73static inline __cpuinit void load_ucode_intel_ap(void) {} 73static inline void load_ucode_intel_ap(void) {}
74static inline void show_ucode_info_early(void) {} 74static inline void show_ucode_info_early(void) {}
75static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } 75static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
76#endif 76#endif
diff --git a/arch/x86/include/asm/mmconfig.h b/arch/x86/include/asm/mmconfig.h
index 9b119da1d105..04a3fed22cfe 100644
--- a/arch/x86/include/asm/mmconfig.h
+++ b/arch/x86/include/asm/mmconfig.h
@@ -2,8 +2,8 @@
2#define _ASM_X86_MMCONFIG_H 2#define _ASM_X86_MMCONFIG_H
3 3
4#ifdef CONFIG_PCI_MMCONFIG 4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void); 5extern void fam10h_check_enable_mmcfg(void);
6extern void __cpuinit check_enable_amd_mmconf_dmi(void); 6extern void check_enable_amd_mmconf_dmi(void);
7#else 7#else
8static inline void fam10h_check_enable_mmcfg(void) { } 8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { } 9static inline void check_enable_amd_mmconf_dmi(void) { }
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 3e2f42a4b872..626cf70082d7 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
94#define default_get_smp_config x86_init_uint_noop 94#define default_get_smp_config x86_init_uint_noop
95#endif 95#endif
96 96
97void __cpuinit generic_processor_info(int apicid, int version); 97void generic_processor_info(int apicid, int version);
98#ifdef CONFIG_ACPI 98#ifdef CONFIG_ACPI
99extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); 99extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
100extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, 100extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 1b99ee5c9f00..4064acae625d 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -39,7 +39,7 @@ static inline void set_apicid_to_node(int apicid, s16 node)
39 __apicid_to_node[apicid] = node; 39 __apicid_to_node[apicid] = node;
40} 40}
41 41
42extern int __cpuinit numa_cpu_node(int cpu); 42extern int numa_cpu_node(int cpu);
43 43
44#else /* CONFIG_NUMA */ 44#else /* CONFIG_NUMA */
45static inline void set_apicid_to_node(int apicid, s16 node) 45static inline void set_apicid_to_node(int apicid, s16 node)
@@ -60,8 +60,8 @@ static inline int numa_cpu_node(int cpu)
60extern void numa_set_node(int cpu, int node); 60extern void numa_set_node(int cpu, int node);
61extern void numa_clear_node(int cpu); 61extern void numa_clear_node(int cpu);
62extern void __init init_cpu_to_node(void); 62extern void __init init_cpu_to_node(void);
63extern void __cpuinit numa_add_cpu(int cpu); 63extern void numa_add_cpu(int cpu);
64extern void __cpuinit numa_remove_cpu(int cpu); 64extern void numa_remove_cpu(int cpu);
65#else /* CONFIG_NUMA */ 65#else /* CONFIG_NUMA */
66static inline void numa_set_node(int cpu, int node) { } 66static inline void numa_set_node(int cpu, int node) { }
67static inline void numa_clear_node(int cpu) { } 67static inline void numa_clear_node(int cpu) { }
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 29937c4f6ff8..24cf5aefb704 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -164,7 +164,7 @@ extern const struct seq_operations cpuinfo_op;
164#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165 165
166extern void cpu_detect(struct cpuinfo_x86 *c); 166extern void cpu_detect(struct cpuinfo_x86 *c);
167extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c); 167extern void fpu_detect(struct cpuinfo_x86 *c);
168 168
169extern void early_cpu_init(void); 169extern void early_cpu_init(void);
170extern void identify_boot_cpu(void); 170extern void identify_boot_cpu(void);
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 60bef663609a..bade6ac3b14f 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -27,7 +27,7 @@ extern int of_ioapic;
27extern u64 initial_dtb; 27extern u64 initial_dtb;
28extern void add_dtb(u64 data); 28extern void add_dtb(u64 data);
29extern void x86_add_irq_domains(void); 29extern void x86_add_irq_domains(void);
30void __cpuinit x86_of_pci_init(void); 30void x86_of_pci_init(void);
31void x86_dtb_init(void); 31void x86_dtb_init(void);
32#else 32#else
33static inline void add_dtb(u64 data) { } 33static inline void add_dtb(u64 data) { }
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index b073aaea747c..4137890e88e3 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -179,7 +179,7 @@ static inline int wbinvd_on_all_cpus(void)
179} 179}
180#endif /* CONFIG_SMP */ 180#endif /* CONFIG_SMP */
181 181
182extern unsigned disabled_cpus __cpuinitdata; 182extern unsigned disabled_cpus;
183 183
184#ifdef CONFIG_X86_32_SMP 184#ifdef CONFIG_X86_32_SMP
185/* 185/*
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d81a972dd506..2627a81253ee 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -195,7 +195,7 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
195 return 0; 195 return 0;
196} 196}
197 197
198static void __cpuinit acpi_register_lapic(int id, u8 enabled) 198static void acpi_register_lapic(int id, u8 enabled)
199{ 199{
200 unsigned int ver = 0; 200 unsigned int ver = 0;
201 201
@@ -607,7 +607,7 @@ void __init acpi_set_irq_model_ioapic(void)
607#ifdef CONFIG_ACPI_HOTPLUG_CPU 607#ifdef CONFIG_ACPI_HOTPLUG_CPU
608#include <acpi/processor.h> 608#include <acpi/processor.h>
609 609
610static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 610static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
611{ 611{
612#ifdef CONFIG_ACPI_NUMA 612#ifdef CONFIG_ACPI_NUMA
613 int nid; 613 int nid;
@@ -620,7 +620,7 @@ static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
620#endif 620#endif
621} 621}
622 622
623static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 623static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
624{ 624{
625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
626 union acpi_object *obj; 626 union acpi_object *obj;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 2a34aaf3c8f1..33120100ff5e 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,9 +48,20 @@ int x86_acpi_suspend_lowlevel(void)
48#ifndef CONFIG_64BIT 48#ifndef CONFIG_64BIT
49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt); 49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
50 50
51 /*
52 * We have to check that we can write back the value, and not
53 * just read it. At least on 90 nm Pentium M (Family 6, Model
54 * 13), reading an invalid MSR is not guaranteed to trap, see
55 * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
56 * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
57 * nm process with 512-KB L2 Cache Specification Update".
58 */
51 if (!rdmsr_safe(MSR_EFER, 59 if (!rdmsr_safe(MSR_EFER,
52 &header->pmode_efer_low, 60 &header->pmode_efer_low,
53 &header->pmode_efer_high)) 61 &header->pmode_efer_high) &&
62 !wrmsr_safe(MSR_EFER,
63 header->pmode_efer_low,
64 header->pmode_efer_high))
54 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); 65 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
55#endif /* !CONFIG_64BIT */ 66#endif /* !CONFIG_64BIT */
56 67
@@ -61,7 +72,10 @@ int x86_acpi_suspend_lowlevel(void)
61 } 72 }
62 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, 73 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
63 &header->pmode_misc_en_low, 74 &header->pmode_misc_en_low,
64 &header->pmode_misc_en_high)) 75 &header->pmode_misc_en_high) &&
76 !wrmsr_safe(MSR_IA32_MISC_ENABLE,
77 header->pmode_misc_en_low,
78 header->pmode_misc_en_high))
65 header->pmode_behavior |= 79 header->pmode_behavior |=
66 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); 80 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
67 header->realmode_flags = acpi_realmode_flags; 81 header->realmode_flags = acpi_realmode_flags;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 99663b59123a..eca89c53a7f5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -58,7 +58,7 @@
58 58
59unsigned int num_processors; 59unsigned int num_processors;
60 60
61unsigned disabled_cpus __cpuinitdata; 61unsigned disabled_cpus;
62 62
63/* Processor that is doing the boot up */ 63/* Processor that is doing the boot up */
64unsigned int boot_cpu_physical_apicid = -1U; 64unsigned int boot_cpu_physical_apicid = -1U;
@@ -544,7 +544,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
544 * Setup the local APIC timer for this CPU. Copy the initialized values 544 * Setup the local APIC timer for this CPU. Copy the initialized values
545 * of the boot CPU and register the clock event in the framework. 545 * of the boot CPU and register the clock event in the framework.
546 */ 546 */
547static void __cpuinit setup_APIC_timer(void) 547static void setup_APIC_timer(void)
548{ 548{
549 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 549 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
550 550
@@ -866,7 +866,7 @@ void __init setup_boot_APIC_clock(void)
866 setup_APIC_timer(); 866 setup_APIC_timer();
867} 867}
868 868
869void __cpuinit setup_secondary_APIC_clock(void) 869void setup_secondary_APIC_clock(void)
870{ 870{
871 setup_APIC_timer(); 871 setup_APIC_timer();
872} 872}
@@ -1229,7 +1229,7 @@ void __init init_bsp_APIC(void)
1229 apic_write(APIC_LVT1, value); 1229 apic_write(APIC_LVT1, value);
1230} 1230}
1231 1231
1232static void __cpuinit lapic_setup_esr(void) 1232static void lapic_setup_esr(void)
1233{ 1233{
1234 unsigned int oldvalue, value, maxlvt; 1234 unsigned int oldvalue, value, maxlvt;
1235 1235
@@ -1276,7 +1276,7 @@ static void __cpuinit lapic_setup_esr(void)
1276 * Used to setup local APIC while initializing BSP or bringin up APs. 1276 * Used to setup local APIC while initializing BSP or bringin up APs.
1277 * Always called with preemption disabled. 1277 * Always called with preemption disabled.
1278 */ 1278 */
1279void __cpuinit setup_local_APIC(void) 1279void setup_local_APIC(void)
1280{ 1280{
1281 int cpu = smp_processor_id(); 1281 int cpu = smp_processor_id();
1282 unsigned int value, queued; 1282 unsigned int value, queued;
@@ -1471,7 +1471,7 @@ void __cpuinit setup_local_APIC(void)
1471#endif 1471#endif
1472} 1472}
1473 1473
1474void __cpuinit end_local_APIC_setup(void) 1474void end_local_APIC_setup(void)
1475{ 1475{
1476 lapic_setup_esr(); 1476 lapic_setup_esr();
1477 1477
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
2107 apic_write(APIC_LVT1, value); 2107 apic_write(APIC_LVT1, value);
2108} 2108}
2109 2109
2110void __cpuinit generic_processor_info(int apicid, int version) 2110void generic_processor_info(int apicid, int version)
2111{ 2111{
2112 int cpu, max = nr_cpu_ids; 2112 int cpu, max = nr_cpu_ids;
2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, 2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2377,7 +2377,7 @@ static struct syscore_ops lapic_syscore_ops = {
2377 .suspend = lapic_suspend, 2377 .suspend = lapic_suspend,
2378}; 2378};
2379 2379
2380static void __cpuinit apic_pm_activate(void) 2380static void apic_pm_activate(void)
2381{ 2381{
2382 apic_pm_state.active = 1; 2382 apic_pm_state.active = 1;
2383} 2383}
@@ -2402,7 +2402,7 @@ static void apic_pm_activate(void) { }
2402 2402
2403#ifdef CONFIG_X86_64 2403#ifdef CONFIG_X86_64
2404 2404
2405static int __cpuinit apic_cluster_num(void) 2405static int apic_cluster_num(void)
2406{ 2406{
2407 int i, clusters, zeros; 2407 int i, clusters, zeros;
2408 unsigned id; 2408 unsigned id;
@@ -2447,10 +2447,10 @@ static int __cpuinit apic_cluster_num(void)
2447 return clusters; 2447 return clusters;
2448} 2448}
2449 2449
2450static int __cpuinitdata multi_checked; 2450static int multi_checked;
2451static int __cpuinitdata multi; 2451static int multi;
2452 2452
2453static int __cpuinit set_multi(const struct dmi_system_id *d) 2453static int set_multi(const struct dmi_system_id *d)
2454{ 2454{
2455 if (multi) 2455 if (multi)
2456 return 0; 2456 return 0;
@@ -2459,7 +2459,7 @@ static int __cpuinit set_multi(const struct dmi_system_id *d)
2459 return 0; 2459 return 0;
2460} 2460}
2461 2461
2462static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { 2462static const struct dmi_system_id multi_dmi_table[] = {
2463 { 2463 {
2464 .callback = set_multi, 2464 .callback = set_multi,
2465 .ident = "IBM System Summit2", 2465 .ident = "IBM System Summit2",
@@ -2471,7 +2471,7 @@ static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
2471 {} 2471 {}
2472}; 2472};
2473 2473
2474static void __cpuinit dmi_check_multi(void) 2474static void dmi_check_multi(void)
2475{ 2475{
2476 if (multi_checked) 2476 if (multi_checked)
2477 return; 2477 return;
@@ -2488,7 +2488,7 @@ static void __cpuinit dmi_check_multi(void)
2488 * multi-chassis. 2488 * multi-chassis.
2489 * Use DMI to check them 2489 * Use DMI to check them
2490 */ 2490 */
2491__cpuinit int apic_is_clustered_box(void) 2491int apic_is_clustered_box(void)
2492{ 2492{
2493 dmi_check_multi(); 2493 dmi_check_multi();
2494 if (multi) 2494 if (multi)
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 9a9110918ca7..3e67f9e3d7ef 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -74,7 +74,7 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
74 return initial_apic_id >> index_msb; 74 return initial_apic_id >> index_msb;
75} 75}
76 76
77static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 77static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
78{ 78{
79 union numachip_csr_g3_ext_irq_gen int_gen; 79 union numachip_csr_g3_ext_irq_gen int_gen;
80 80
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 0874799a98c6..c55224731b2d 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -130,7 +130,7 @@ int es7000_plat;
130 */ 130 */
131 131
132 132
133static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) 133static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
134{ 134{
135 unsigned long vect = 0, psaival = 0; 135 unsigned long vect = 0, psaival = 0;
136 136
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index d661ee95cabf..1e42e8f305ee 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -105,7 +105,7 @@ static void __init smp_dump_qct(void)
105 } 105 }
106} 106}
107 107
108void __cpuinit numaq_tsc_disable(void) 108void numaq_tsc_disable(void)
109{ 109{
110 if (!found_numaq) 110 if (!found_numaq)
111 return; 111 return;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index c88baa4ff0e5..140e29db478d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -148,7 +148,7 @@ static void init_x2apic_ldr(void)
148 /* 148 /*
149 * At CPU state changes, update the x2apic cluster sibling info. 149 * At CPU state changes, update the x2apic cluster sibling info.
150 */ 150 */
151static int __cpuinit 151static int
152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) 152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
153{ 153{
154 unsigned int this_cpu = (unsigned long)hcpu; 154 unsigned int this_cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 63092afb142e..1191ac1c9d25 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
209unsigned long sn_rtc_cycles_per_second; 209unsigned long sn_rtc_cycles_per_second;
210EXPORT_SYMBOL(sn_rtc_cycles_per_second); 210EXPORT_SYMBOL(sn_rtc_cycles_per_second);
211 211
212static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 212static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
213{ 213{
214#ifdef CONFIG_SMP 214#ifdef CONFIG_SMP
215 unsigned long val; 215 unsigned long val;
@@ -416,7 +416,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
417}; 417};
418 418
419static __cpuinit void set_x2apic_extra_bits(int pnode) 419static void set_x2apic_extra_bits(int pnode)
420{ 420{
421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); 421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
422} 422}
@@ -735,7 +735,7 @@ static void uv_heartbeat(unsigned long ignored)
735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
736} 736}
737 737
738static void __cpuinit uv_heartbeat_enable(int cpu) 738static void uv_heartbeat_enable(int cpu)
739{ 739{
740 while (!uv_cpu_hub_info(cpu)->scir.enabled) { 740 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; 741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
@@ -752,7 +752,7 @@ static void __cpuinit uv_heartbeat_enable(int cpu)
752} 752}
753 753
754#ifdef CONFIG_HOTPLUG_CPU 754#ifdef CONFIG_HOTPLUG_CPU
755static void __cpuinit uv_heartbeat_disable(int cpu) 755static void uv_heartbeat_disable(int cpu)
756{ 756{
757 if (uv_cpu_hub_info(cpu)->scir.enabled) { 757 if (uv_cpu_hub_info(cpu)->scir.enabled) {
758 uv_cpu_hub_info(cpu)->scir.enabled = 0; 758 uv_cpu_hub_info(cpu)->scir.enabled = 0;
@@ -764,8 +764,8 @@ static void __cpuinit uv_heartbeat_disable(int cpu)
764/* 764/*
765 * cpu hotplug notifier 765 * cpu hotplug notifier
766 */ 766 */
767static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 767static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
768 unsigned long action, void *hcpu) 768 void *hcpu)
769{ 769{
770 long cpu = (long)hcpu; 770 long cpu = (long)hcpu;
771 771
@@ -835,7 +835,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode,
835 * Called on each cpu to initialize the per_cpu UV data area. 835 * Called on each cpu to initialize the per_cpu UV data area.
836 * FIXME: hotplug not supported yet 836 * FIXME: hotplug not supported yet
837 */ 837 */
838void __cpuinit uv_cpu_init(void) 838void uv_cpu_init(void)
839{ 839{
840 /* CPU 0 initilization will be done via uv_system_init. */ 840 /* CPU 0 initilization will be done via uv_system_init. */
841 if (!uv_blade_info) 841 if (!uv_blade_info)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c587a8757227..f654ecefea5b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -69,7 +69,7 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
69extern void vide(void); 69extern void vide(void);
70__asm__(".align 4\nvide: ret"); 70__asm__(".align 4\nvide: ret");
71 71
72static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) 72static void init_amd_k5(struct cpuinfo_x86 *c)
73{ 73{
74/* 74/*
75 * General Systems BIOSen alias the cpu frequency registers 75 * General Systems BIOSen alias the cpu frequency registers
@@ -87,7 +87,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
87} 87}
88 88
89 89
90static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) 90static void init_amd_k6(struct cpuinfo_x86 *c)
91{ 91{
92 u32 l, h; 92 u32 l, h;
93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
@@ -179,7 +179,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
179 } 179 }
180} 180}
181 181
182static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) 182static void amd_k7_smp_check(struct cpuinfo_x86 *c)
183{ 183{
184 /* calling is from identify_secondary_cpu() ? */ 184 /* calling is from identify_secondary_cpu() ? */
185 if (!c->cpu_index) 185 if (!c->cpu_index)
@@ -222,7 +222,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223} 223}
224 224
225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void init_amd_k7(struct cpuinfo_x86 *c)
226{ 226{
227 u32 l, h; 227 u32 l, h;
228 228
@@ -267,7 +267,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
267 * To workaround broken NUMA config. Read the comment in 267 * To workaround broken NUMA config. Read the comment in
268 * srat_detect_node(). 268 * srat_detect_node().
269 */ 269 */
270static int __cpuinit nearby_node(int apicid) 270static int nearby_node(int apicid)
271{ 271{
272 int i, node; 272 int i, node;
273 273
@@ -292,7 +292,7 @@ static int __cpuinit nearby_node(int apicid)
292 * (2) AMD processors supporting compute units 292 * (2) AMD processors supporting compute units
293 */ 293 */
294#ifdef CONFIG_X86_HT 294#ifdef CONFIG_X86_HT
295static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) 295static void amd_get_topology(struct cpuinfo_x86 *c)
296{ 296{
297 u32 nodes, cores_per_cu = 1; 297 u32 nodes, cores_per_cu = 1;
298 u8 node_id; 298 u8 node_id;
@@ -342,7 +342,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
343 * Assumes number of cores is a power of two. 343 * Assumes number of cores is a power of two.
344 */ 344 */
345static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) 345static void amd_detect_cmp(struct cpuinfo_x86 *c)
346{ 346{
347#ifdef CONFIG_X86_HT 347#ifdef CONFIG_X86_HT
348 unsigned bits; 348 unsigned bits;
@@ -369,7 +369,7 @@ u16 amd_get_nb_id(int cpu)
369} 369}
370EXPORT_SYMBOL_GPL(amd_get_nb_id); 370EXPORT_SYMBOL_GPL(amd_get_nb_id);
371 371
372static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 372static void srat_detect_node(struct cpuinfo_x86 *c)
373{ 373{
374#ifdef CONFIG_NUMA 374#ifdef CONFIG_NUMA
375 int cpu = smp_processor_id(); 375 int cpu = smp_processor_id();
@@ -421,7 +421,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
421#endif 421#endif
422} 422}
423 423
424static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 424static void early_init_amd_mc(struct cpuinfo_x86 *c)
425{ 425{
426#ifdef CONFIG_X86_HT 426#ifdef CONFIG_X86_HT
427 unsigned bits, ecx; 427 unsigned bits, ecx;
@@ -447,7 +447,7 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
447#endif 447#endif
448} 448}
449 449
450static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) 450static void bsp_init_amd(struct cpuinfo_x86 *c)
451{ 451{
452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
453 453
@@ -475,7 +475,7 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
475 } 475 }
476} 476}
477 477
478static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 478static void early_init_amd(struct cpuinfo_x86 *c)
479{ 479{
480 early_init_amd_mc(c); 480 early_init_amd_mc(c);
481 481
@@ -514,7 +514,7 @@ static const int amd_erratum_383[];
514static const int amd_erratum_400[]; 514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum); 515static bool cpu_has_amd_erratum(const int *erratum);
516 516
517static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void init_amd(struct cpuinfo_x86 *c)
518{ 518{
519 u32 dummy; 519 u32 dummy;
520 unsigned long long value; 520 unsigned long long value;
@@ -740,8 +740,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
740} 740}
741 741
742#ifdef CONFIG_X86_32 742#ifdef CONFIG_X86_32
743static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, 743static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
744 unsigned int size)
745{ 744{
746 /* AMD errata T13 (order #21922) */ 745 /* AMD errata T13 (order #21922) */
747 if ((c->x86 == 6)) { 746 if ((c->x86 == 6)) {
@@ -757,7 +756,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
757} 756}
758#endif 757#endif
759 758
760static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 759static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
761{ 760{
762 tlb_flushall_shift = 5; 761 tlb_flushall_shift = 5;
763 762
@@ -765,7 +764,7 @@ static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
765 tlb_flushall_shift = 4; 764 tlb_flushall_shift = 4;
766} 765}
767 766
768static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 767static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
769{ 768{
770 u32 ebx, eax, ecx, edx; 769 u32 ebx, eax, ecx, edx;
771 u16 mask = 0xfff; 770 u16 mask = 0xfff;
@@ -820,7 +819,7 @@ static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
820 cpu_set_tlb_flushall_shift(c); 819 cpu_set_tlb_flushall_shift(c);
821} 820}
822 821
823static const struct cpu_dev __cpuinitconst amd_cpu_dev = { 822static const struct cpu_dev amd_cpu_dev = {
824 .c_vendor = "AMD", 823 .c_vendor = "AMD",
825 .c_ident = { "AuthenticAMD" }, 824 .c_ident = { "AuthenticAMD" },
826#ifdef CONFIG_X86_32 825#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 159103c0b1f4..fbf6c3bc2400 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -11,7 +11,7 @@
11 11
12#ifdef CONFIG_X86_OOSTORE 12#ifdef CONFIG_X86_OOSTORE
13 13
14static u32 __cpuinit power2(u32 x) 14static u32 power2(u32 x)
15{ 15{
16 u32 s = 1; 16 u32 s = 1;
17 17
@@ -25,7 +25,7 @@ static u32 __cpuinit power2(u32 x)
25/* 25/*
26 * Set up an actual MCR 26 * Set up an actual MCR
27 */ 27 */
28static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) 28static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
29{ 29{
30 u32 lo, hi; 30 u32 lo, hi;
31 31
@@ -42,7 +42,7 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
42 * 42 *
43 * Shortcut: We know you can't put 4Gig of RAM on a winchip 43 * Shortcut: We know you can't put 4Gig of RAM on a winchip
44 */ 44 */
45static u32 __cpuinit ramtop(void) 45static u32 ramtop(void)
46{ 46{
47 u32 clip = 0xFFFFFFFFUL; 47 u32 clip = 0xFFFFFFFFUL;
48 u32 top = 0; 48 u32 top = 0;
@@ -91,7 +91,7 @@ static u32 __cpuinit ramtop(void)
91/* 91/*
92 * Compute a set of MCR's to give maximum coverage 92 * Compute a set of MCR's to give maximum coverage
93 */ 93 */
94static int __cpuinit centaur_mcr_compute(int nr, int key) 94static int centaur_mcr_compute(int nr, int key)
95{ 95{
96 u32 mem = ramtop(); 96 u32 mem = ramtop();
97 u32 root = power2(mem); 97 u32 root = power2(mem);
@@ -157,7 +157,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
157 return ct; 157 return ct;
158} 158}
159 159
160static void __cpuinit centaur_create_optimal_mcr(void) 160static void centaur_create_optimal_mcr(void)
161{ 161{
162 int used; 162 int used;
163 int i; 163 int i;
@@ -181,7 +181,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
181 wrmsr(MSR_IDT_MCR0+i, 0, 0); 181 wrmsr(MSR_IDT_MCR0+i, 0, 0);
182} 182}
183 183
184static void __cpuinit winchip2_create_optimal_mcr(void) 184static void winchip2_create_optimal_mcr(void)
185{ 185{
186 u32 lo, hi; 186 u32 lo, hi;
187 int used; 187 int used;
@@ -217,7 +217,7 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
217/* 217/*
218 * Handle the MCR key on the Winchip 2. 218 * Handle the MCR key on the Winchip 2.
219 */ 219 */
220static void __cpuinit winchip2_unprotect_mcr(void) 220static void winchip2_unprotect_mcr(void)
221{ 221{
222 u32 lo, hi; 222 u32 lo, hi;
223 u32 key; 223 u32 key;
@@ -229,7 +229,7 @@ static void __cpuinit winchip2_unprotect_mcr(void)
229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
230} 230}
231 231
232static void __cpuinit winchip2_protect_mcr(void) 232static void winchip2_protect_mcr(void)
233{ 233{
234 u32 lo, hi; 234 u32 lo, hi;
235 235
@@ -247,7 +247,7 @@ static void __cpuinit winchip2_protect_mcr(void)
247#define RNG_ENABLED (1 << 3) 247#define RNG_ENABLED (1 << 3)
248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ 248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
249 249
250static void __cpuinit init_c3(struct cpuinfo_x86 *c) 250static void init_c3(struct cpuinfo_x86 *c)
251{ 251{
252 u32 lo, hi; 252 u32 lo, hi;
253 253
@@ -318,7 +318,7 @@ enum {
318 EAMD3D = 1<<20, 318 EAMD3D = 1<<20,
319}; 319};
320 320
321static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 321static void early_init_centaur(struct cpuinfo_x86 *c)
322{ 322{
323 switch (c->x86) { 323 switch (c->x86) {
324#ifdef CONFIG_X86_32 324#ifdef CONFIG_X86_32
@@ -337,7 +337,7 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
337#endif 337#endif
338} 338}
339 339
340static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 340static void init_centaur(struct cpuinfo_x86 *c)
341{ 341{
342#ifdef CONFIG_X86_32 342#ifdef CONFIG_X86_32
343 char *name; 343 char *name;
@@ -468,7 +468,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
468#endif 468#endif
469} 469}
470 470
471static unsigned int __cpuinit 471static unsigned int
472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
473{ 473{
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
@@ -488,7 +488,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
488 return size; 488 return size;
489} 489}
490 490
491static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { 491static const struct cpu_dev centaur_cpu_dev = {
492 .c_vendor = "Centaur", 492 .c_vendor = "Centaur",
493 .c_ident = { "CentaurHauls" }, 493 .c_ident = { "CentaurHauls" },
494 .c_early_init = early_init_centaur, 494 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 548bd039784e..25eb2747b063 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -63,7 +63,7 @@ void __init setup_cpu_local_masks(void)
63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
64} 64}
65 65
66static void __cpuinit default_init(struct cpuinfo_x86 *c) 66static void default_init(struct cpuinfo_x86 *c)
67{ 67{
68#ifdef CONFIG_X86_64 68#ifdef CONFIG_X86_64
69 cpu_detect_cache_sizes(c); 69 cpu_detect_cache_sizes(c);
@@ -80,13 +80,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
80#endif 80#endif
81} 81}
82 82
83static const struct cpu_dev __cpuinitconst default_cpu = { 83static const struct cpu_dev default_cpu = {
84 .c_init = default_init, 84 .c_init = default_init,
85 .c_vendor = "Unknown", 85 .c_vendor = "Unknown",
86 .c_x86_vendor = X86_VENDOR_UNKNOWN, 86 .c_x86_vendor = X86_VENDOR_UNKNOWN,
87}; 87};
88 88
89static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 89static const struct cpu_dev *this_cpu = &default_cpu;
90 90
91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
92#ifdef CONFIG_X86_64 92#ifdef CONFIG_X86_64
@@ -160,8 +160,8 @@ static int __init x86_xsaveopt_setup(char *s)
160__setup("noxsaveopt", x86_xsaveopt_setup); 160__setup("noxsaveopt", x86_xsaveopt_setup);
161 161
162#ifdef CONFIG_X86_32 162#ifdef CONFIG_X86_32
163static int cachesize_override __cpuinitdata = -1; 163static int cachesize_override = -1;
164static int disable_x86_serial_nr __cpuinitdata = 1; 164static int disable_x86_serial_nr = 1;
165 165
166static int __init cachesize_setup(char *str) 166static int __init cachesize_setup(char *str)
167{ 167{
@@ -215,12 +215,12 @@ static inline int flag_is_changeable_p(u32 flag)
215} 215}
216 216
217/* Probe for the CPUID instruction */ 217/* Probe for the CPUID instruction */
218int __cpuinit have_cpuid_p(void) 218int have_cpuid_p(void)
219{ 219{
220 return flag_is_changeable_p(X86_EFLAGS_ID); 220 return flag_is_changeable_p(X86_EFLAGS_ID);
221} 221}
222 222
223static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 223static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
224{ 224{
225 unsigned long lo, hi; 225 unsigned long lo, hi;
226 226
@@ -298,7 +298,7 @@ struct cpuid_dependent_feature {
298 u32 level; 298 u32 level;
299}; 299};
300 300
301static const struct cpuid_dependent_feature __cpuinitconst 301static const struct cpuid_dependent_feature
302cpuid_dependent_features[] = { 302cpuid_dependent_features[] = {
303 { X86_FEATURE_MWAIT, 0x00000005 }, 303 { X86_FEATURE_MWAIT, 0x00000005 },
304 { X86_FEATURE_DCA, 0x00000009 }, 304 { X86_FEATURE_DCA, 0x00000009 },
@@ -306,7 +306,7 @@ cpuid_dependent_features[] = {
306 { 0, 0 } 306 { 0, 0 }
307}; 307};
308 308
309static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 309static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
310{ 310{
311 const struct cpuid_dependent_feature *df; 311 const struct cpuid_dependent_feature *df;
312 312
@@ -344,7 +344,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
344 */ 344 */
345 345
346/* Look up CPU names by table lookup. */ 346/* Look up CPU names by table lookup. */
347static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) 347static const char *table_lookup_model(struct cpuinfo_x86 *c)
348{ 348{
349 const struct cpu_model_info *info; 349 const struct cpu_model_info *info;
350 350
@@ -364,8 +364,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
364 return NULL; /* Not found */ 364 return NULL; /* Not found */
365} 365}
366 366
367__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; 367__u32 cpu_caps_cleared[NCAPINTS];
368__u32 cpu_caps_set[NCAPINTS] __cpuinitdata; 368__u32 cpu_caps_set[NCAPINTS];
369 369
370void load_percpu_segment(int cpu) 370void load_percpu_segment(int cpu)
371{ 371{
@@ -394,9 +394,9 @@ void switch_to_new_gdt(int cpu)
394 load_percpu_segment(cpu); 394 load_percpu_segment(cpu);
395} 395}
396 396
397static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 397static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
398 398
399static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 399static void get_model_name(struct cpuinfo_x86 *c)
400{ 400{
401 unsigned int *v; 401 unsigned int *v;
402 char *p, *q; 402 char *p, *q;
@@ -425,7 +425,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
425 } 425 }
426} 426}
427 427
428void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 428void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
429{ 429{
430 unsigned int n, dummy, ebx, ecx, edx, l2size; 430 unsigned int n, dummy, ebx, ecx, edx, l2size;
431 431
@@ -479,7 +479,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO];
479 */ 479 */
480s8 __read_mostly tlb_flushall_shift = -1; 480s8 __read_mostly tlb_flushall_shift = -1;
481 481
482void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) 482void cpu_detect_tlb(struct cpuinfo_x86 *c)
483{ 483{
484 if (this_cpu->c_detect_tlb) 484 if (this_cpu->c_detect_tlb)
485 this_cpu->c_detect_tlb(c); 485 this_cpu->c_detect_tlb(c);
@@ -493,7 +493,7 @@ void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
493 tlb_flushall_shift); 493 tlb_flushall_shift);
494} 494}
495 495
496void __cpuinit detect_ht(struct cpuinfo_x86 *c) 496void detect_ht(struct cpuinfo_x86 *c)
497{ 497{
498#ifdef CONFIG_X86_HT 498#ifdef CONFIG_X86_HT
499 u32 eax, ebx, ecx, edx; 499 u32 eax, ebx, ecx, edx;
@@ -544,7 +544,7 @@ out:
544#endif 544#endif
545} 545}
546 546
547static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 547static void get_cpu_vendor(struct cpuinfo_x86 *c)
548{ 548{
549 char *v = c->x86_vendor_id; 549 char *v = c->x86_vendor_id;
550 int i; 550 int i;
@@ -571,7 +571,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
571 this_cpu = &default_cpu; 571 this_cpu = &default_cpu;
572} 572}
573 573
574void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 574void cpu_detect(struct cpuinfo_x86 *c)
575{ 575{
576 /* Get vendor name */ 576 /* Get vendor name */
577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -601,7 +601,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
601 } 601 }
602} 602}
603 603
604void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 604void get_cpu_cap(struct cpuinfo_x86 *c)
605{ 605{
606 u32 tfms, xlvl; 606 u32 tfms, xlvl;
607 u32 ebx; 607 u32 ebx;
@@ -652,7 +652,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
652 init_scattered_cpuid_features(c); 652 init_scattered_cpuid_features(c);
653} 653}
654 654
655static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 655static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
656{ 656{
657#ifdef CONFIG_X86_32 657#ifdef CONFIG_X86_32
658 int i; 658 int i;
@@ -769,7 +769,7 @@ void __init early_cpu_init(void)
769 * unless we can find a reliable way to detect all the broken cases. 769 * unless we can find a reliable way to detect all the broken cases.
770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
771 */ 771 */
772static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 772static void detect_nopl(struct cpuinfo_x86 *c)
773{ 773{
774#ifdef CONFIG_X86_32 774#ifdef CONFIG_X86_32
775 clear_cpu_cap(c, X86_FEATURE_NOPL); 775 clear_cpu_cap(c, X86_FEATURE_NOPL);
@@ -778,7 +778,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
778#endif 778#endif
779} 779}
780 780
781static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 781static void generic_identify(struct cpuinfo_x86 *c)
782{ 782{
783 c->extended_cpuid_level = 0; 783 c->extended_cpuid_level = 0;
784 784
@@ -815,7 +815,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
815/* 815/*
816 * This does the hard work of actually picking apart the CPU stuff... 816 * This does the hard work of actually picking apart the CPU stuff...
817 */ 817 */
818static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 818static void identify_cpu(struct cpuinfo_x86 *c)
819{ 819{
820 int i; 820 int i;
821 821
@@ -960,7 +960,7 @@ void __init identify_boot_cpu(void)
960 cpu_detect_tlb(&boot_cpu_data); 960 cpu_detect_tlb(&boot_cpu_data);
961} 961}
962 962
963void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 963void identify_secondary_cpu(struct cpuinfo_x86 *c)
964{ 964{
965 BUG_ON(c == &boot_cpu_data); 965 BUG_ON(c == &boot_cpu_data);
966 identify_cpu(c); 966 identify_cpu(c);
@@ -975,14 +975,14 @@ struct msr_range {
975 unsigned max; 975 unsigned max;
976}; 976};
977 977
978static const struct msr_range msr_range_array[] __cpuinitconst = { 978static const struct msr_range msr_range_array[] = {
979 { 0x00000000, 0x00000418}, 979 { 0x00000000, 0x00000418},
980 { 0xc0000000, 0xc000040b}, 980 { 0xc0000000, 0xc000040b},
981 { 0xc0010000, 0xc0010142}, 981 { 0xc0010000, 0xc0010142},
982 { 0xc0011000, 0xc001103b}, 982 { 0xc0011000, 0xc001103b},
983}; 983};
984 984
985static void __cpuinit __print_cpu_msr(void) 985static void __print_cpu_msr(void)
986{ 986{
987 unsigned index_min, index_max; 987 unsigned index_min, index_max;
988 unsigned index; 988 unsigned index;
@@ -1001,7 +1001,7 @@ static void __cpuinit __print_cpu_msr(void)
1001 } 1001 }
1002} 1002}
1003 1003
1004static int show_msr __cpuinitdata; 1004static int show_msr;
1005 1005
1006static __init int setup_show_msr(char *arg) 1006static __init int setup_show_msr(char *arg)
1007{ 1007{
@@ -1022,7 +1022,7 @@ static __init int setup_noclflush(char *arg)
1022} 1022}
1023__setup("noclflush", setup_noclflush); 1023__setup("noclflush", setup_noclflush);
1024 1024
1025void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1025void print_cpu_info(struct cpuinfo_x86 *c)
1026{ 1026{
1027 const char *vendor = NULL; 1027 const char *vendor = NULL;
1028 1028
@@ -1051,7 +1051,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1051 print_cpu_msr(c); 1051 print_cpu_msr(c);
1052} 1052}
1053 1053
1054void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) 1054void print_cpu_msr(struct cpuinfo_x86 *c)
1055{ 1055{
1056 if (c->cpu_index < show_msr) 1056 if (c->cpu_index < show_msr)
1057 __print_cpu_msr(); 1057 __print_cpu_msr();
@@ -1216,7 +1216,7 @@ static void dbg_restore_debug_regs(void)
1216 */ 1216 */
1217#ifdef CONFIG_X86_64 1217#ifdef CONFIG_X86_64
1218 1218
1219void __cpuinit cpu_init(void) 1219void cpu_init(void)
1220{ 1220{
1221 struct orig_ist *oist; 1221 struct orig_ist *oist;
1222 struct task_struct *me; 1222 struct task_struct *me;
@@ -1315,7 +1315,7 @@ void __cpuinit cpu_init(void)
1315 1315
1316#else 1316#else
1317 1317
1318void __cpuinit cpu_init(void) 1318void cpu_init(void)
1319{ 1319{
1320 int cpu = smp_processor_id(); 1320 int cpu = smp_processor_id();
1321 struct task_struct *curr = current; 1321 struct task_struct *curr = current;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 7582f475b163..d0969c75ab54 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,7 +15,7 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 21
@@ -44,7 +44,7 @@ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 } 44 }
45} 45}
46 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 47static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{ 48{
49 unsigned long flags; 49 unsigned long flags;
50 50
@@ -59,25 +59,25 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
59 * Actually since bugs.h doesn't even reference this perhaps someone should 59 * Actually since bugs.h doesn't even reference this perhaps someone should
60 * fix the documentation ??? 60 * fix the documentation ???
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb = 0;
63 63
64static const char __cpuinitconst Cx86_model[][9] = { 64static const char Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static const char __cpuinitconst Cx486_name[][5] = { 68static const char Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static const char __cpuinitconst Cx486S_name[][4] = { 72static const char Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static const char __cpuinitconst Cx486D_name[][4] = { 75static const char Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] = "?.5x Core/Bus Clock";
79static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; 79static const char cyrix_model_mult1[] = "12??43";
80static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; 80static const char cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -87,7 +87,7 @@ static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP 87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
88 */ 88 */
89 89
90static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) 90static void check_cx686_slop(struct cpuinfo_x86 *c)
91{ 91{
92 unsigned long flags; 92 unsigned long flags;
93 93
@@ -112,7 +112,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
112} 112}
113 113
114 114
115static void __cpuinit set_cx86_reorder(void) 115static void set_cx86_reorder(void)
116{ 116{
117 u8 ccr3; 117 u8 ccr3;
118 118
@@ -127,7 +127,7 @@ static void __cpuinit set_cx86_reorder(void)
127 setCx86(CX86_CCR3, ccr3); 127 setCx86(CX86_CCR3, ccr3);
128} 128}
129 129
130static void __cpuinit set_cx86_memwb(void) 130static void set_cx86_memwb(void)
131{ 131{
132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
133 133
@@ -143,7 +143,7 @@ static void __cpuinit set_cx86_memwb(void)
143 * Configure later MediaGX and/or Geode processor. 143 * Configure later MediaGX and/or Geode processor.
144 */ 144 */
145 145
146static void __cpuinit geode_configure(void) 146static void geode_configure(void)
147{ 147{
148 unsigned long flags; 148 unsigned long flags;
149 u8 ccr3; 149 u8 ccr3;
@@ -166,7 +166,7 @@ static void __cpuinit geode_configure(void)
166 local_irq_restore(flags); 166 local_irq_restore(flags);
167} 167}
168 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) 169static void early_init_cyrix(struct cpuinfo_x86 *c)
170{ 170{
171 unsigned char dir0, dir0_msn, dir1 = 0; 171 unsigned char dir0, dir0_msn, dir1 = 0;
172 172
@@ -185,7 +185,7 @@ static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
185 } 185 }
186} 186}
187 187
188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void init_cyrix(struct cpuinfo_x86 *c)
189{ 189{
190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; 190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
191 char *buf = c->x86_model_id; 191 char *buf = c->x86_model_id;
@@ -356,7 +356,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
356/* 356/*
357 * Handle National Semiconductor branded processors 357 * Handle National Semiconductor branded processors
358 */ 358 */
359static void __cpuinit init_nsc(struct cpuinfo_x86 *c) 359static void init_nsc(struct cpuinfo_x86 *c)
360{ 360{
361 /* 361 /*
362 * There may be GX1 processors in the wild that are branded 362 * There may be GX1 processors in the wild that are branded
@@ -405,7 +405,7 @@ static inline int test_cyrix_52div(void)
405 return (unsigned char) (test >> 8) == 0x02; 405 return (unsigned char) (test >> 8) == 0x02;
406} 406}
407 407
408static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) 408static void cyrix_identify(struct cpuinfo_x86 *c)
409{ 409{
410 /* Detect Cyrix with disabled CPUID */ 410 /* Detect Cyrix with disabled CPUID */
411 if (c->x86 == 4 && test_cyrix_52div()) { 411 if (c->x86 == 4 && test_cyrix_52div()) {
@@ -441,7 +441,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
441 } 441 }
442} 442}
443 443
444static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { 444static const struct cpu_dev cyrix_cpu_dev = {
445 .c_vendor = "Cyrix", 445 .c_vendor = "Cyrix",
446 .c_ident = { "CyrixInstead" }, 446 .c_ident = { "CyrixInstead" },
447 .c_early_init = early_init_cyrix, 447 .c_early_init = early_init_cyrix,
@@ -452,7 +452,7 @@ static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
452 452
453cpu_dev_register(cyrix_cpu_dev); 453cpu_dev_register(cyrix_cpu_dev);
454 454
455static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { 455static const struct cpu_dev nsc_cpu_dev = {
456 .c_vendor = "NSC", 456 .c_vendor = "NSC",
457 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
458 .c_init = init_nsc, 458 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 1e7e84a02eba..87279212d318 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -60,7 +60,7 @@ detect_hypervisor_vendor(void)
60 } 60 }
61} 61}
62 62
63void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) 63void init_hypervisor(struct cpuinfo_x86 *c)
64{ 64{
65 if (x86_hyper && x86_hyper->set_cpu_features) 65 if (x86_hyper && x86_hyper->set_cpu_features)
66 x86_hyper->set_cpu_features(c); 66 x86_hyper->set_cpu_features(c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9b0c441c03f5..ec7299566f79 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -26,7 +26,7 @@
26#include <asm/apic.h> 26#include <asm/apic.h>
27#endif 27#endif
28 28
29static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 29static void early_init_intel(struct cpuinfo_x86 *c)
30{ 30{
31 u64 misc_enable; 31 u64 misc_enable;
32 32
@@ -163,7 +163,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
163 * This is called before we do cpu ident work 163 * This is called before we do cpu ident work
164 */ 164 */
165 165
166int __cpuinit ppro_with_ram_bug(void) 166int ppro_with_ram_bug(void)
167{ 167{
168 /* Uses data from early_cpu_detect now */ 168 /* Uses data from early_cpu_detect now */
169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -176,7 +176,7 @@ int __cpuinit ppro_with_ram_bug(void)
176 return 0; 176 return 0;
177} 177}
178 178
179static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 179static void intel_smp_check(struct cpuinfo_x86 *c)
180{ 180{
181 /* calling is from identify_secondary_cpu() ? */ 181 /* calling is from identify_secondary_cpu() ? */
182 if (!c->cpu_index) 182 if (!c->cpu_index)
@@ -196,7 +196,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
196 } 196 }
197} 197}
198 198
199static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 199static void intel_workarounds(struct cpuinfo_x86 *c)
200{ 200{
201 unsigned long lo, hi; 201 unsigned long lo, hi;
202 202
@@ -275,12 +275,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
275 intel_smp_check(c); 275 intel_smp_check(c);
276} 276}
277#else 277#else
278static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 278static void intel_workarounds(struct cpuinfo_x86 *c)
279{ 279{
280} 280}
281#endif 281#endif
282 282
283static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 283static void srat_detect_node(struct cpuinfo_x86 *c)
284{ 284{
285#ifdef CONFIG_NUMA 285#ifdef CONFIG_NUMA
286 unsigned node; 286 unsigned node;
@@ -300,7 +300,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
300/* 300/*
301 * find out the number of processor cores on the die 301 * find out the number of processor cores on the die
302 */ 302 */
303static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 303static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
304{ 304{
305 unsigned int eax, ebx, ecx, edx; 305 unsigned int eax, ebx, ecx, edx;
306 306
@@ -315,7 +315,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
315 return 1; 315 return 1;
316} 316}
317 317
318static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 318static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
319{ 319{
320 /* Intel VMX MSR indicated features */ 320 /* Intel VMX MSR indicated features */
321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
@@ -353,7 +353,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
353 } 353 }
354} 354}
355 355
356static void __cpuinit init_intel(struct cpuinfo_x86 *c) 356static void init_intel(struct cpuinfo_x86 *c)
357{ 357{
358 unsigned int l2 = 0; 358 unsigned int l2 = 0;
359 359
@@ -472,7 +472,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
472} 472}
473 473
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
475static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 475static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
476{ 476{
477 /* 477 /*
478 * Intel PIII Tualatin. This comes in two flavours. 478 * Intel PIII Tualatin. This comes in two flavours.
@@ -506,7 +506,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
506 506
507#define STLB_4K 0x41 507#define STLB_4K 0x41
508 508
509static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { 509static const struct _tlb_table intel_tlb_table[] = {
510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
@@ -536,7 +536,7 @@ static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
536 { 0x00, 0, 0 } 536 { 0x00, 0, 0 }
537}; 537};
538 538
539static void __cpuinit intel_tlb_lookup(const unsigned char desc) 539static void intel_tlb_lookup(const unsigned char desc)
540{ 540{
541 unsigned char k; 541 unsigned char k;
542 if (desc == 0) 542 if (desc == 0)
@@ -605,7 +605,7 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc)
605 } 605 }
606} 606}
607 607
608static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 608static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
609{ 609{
610 switch ((c->x86 << 8) + c->x86_model) { 610 switch ((c->x86 << 8) + c->x86_model) {
611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ 611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
@@ -634,7 +634,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
634 } 634 }
635} 635}
636 636
637static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) 637static void intel_detect_tlb(struct cpuinfo_x86 *c)
638{ 638{
639 int i, j, n; 639 int i, j, n;
640 unsigned int regs[4]; 640 unsigned int regs[4];
@@ -661,7 +661,7 @@ static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
661 intel_tlb_flushall_shift_set(c); 661 intel_tlb_flushall_shift_set(c);
662} 662}
663 663
664static const struct cpu_dev __cpuinitconst intel_cpu_dev = { 664static const struct cpu_dev intel_cpu_dev = {
665 .c_vendor = "Intel", 665 .c_vendor = "Intel",
666 .c_ident = { "GenuineIntel" }, 666 .c_ident = { "GenuineIntel" },
667#ifdef CONFIG_X86_32 667#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 8dc72dda66fe..1414c90feaba 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -37,7 +37,7 @@ struct _cache_table {
37/* All the cache descriptor types we care about (no TLB or 37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */ 38 trace cache entries) */
39 39
40static const struct _cache_table __cpuinitconst cache_table[] = 40static const struct _cache_table cache_table[] =
41{ 41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -203,7 +203,7 @@ union l3_cache {
203 unsigned val; 203 unsigned val;
204}; 204};
205 205
206static const unsigned short __cpuinitconst assocs[] = { 206static const unsigned short assocs[] = {
207 [1] = 1, 207 [1] = 1,
208 [2] = 2, 208 [2] = 2,
209 [4] = 4, 209 [4] = 4,
@@ -217,10 +217,10 @@ static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */ 217 [0xf] = 0xffff /* fully associative - no way to show this currently */
218}; 218};
219 219
220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; 220static const unsigned char levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; 221static const unsigned char types[] = { 1, 2, 3, 3 };
222 222
223static void __cpuinit 223static void
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx, 225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx) 226 union _cpuid4_leaf_ecx *ecx)
@@ -302,7 +302,7 @@ struct _cache_attr {
302/* 302/*
303 * L3 cache descriptors 303 * L3 cache descriptors
304 */ 304 */
305static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) 305static void amd_calc_l3_indices(struct amd_northbridge *nb)
306{ 306{
307 struct amd_l3_cache *l3 = &nb->l3_cache; 307 struct amd_l3_cache *l3 = &nb->l3_cache;
308 unsigned int sc0, sc1, sc2, sc3; 308 unsigned int sc0, sc1, sc2, sc3;
@@ -325,7 +325,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
326} 326}
327 327
328static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) 328static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
329{ 329{
330 int node; 330 int node;
331 331
@@ -528,8 +528,7 @@ static struct _cache_attr subcaches =
528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
529 529
530static int 530static int
531__cpuinit cpuid4_cache_lookup_regs(int index, 531cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
532 struct _cpuid4_info_regs *this_leaf)
533{ 532{
534 union _cpuid4_leaf_eax eax; 533 union _cpuid4_leaf_eax eax;
535 union _cpuid4_leaf_ebx ebx; 534 union _cpuid4_leaf_ebx ebx;
@@ -560,7 +559,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
560 return 0; 559 return 0;
561} 560}
562 561
563static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) 562static int find_num_cache_leaves(struct cpuinfo_x86 *c)
564{ 563{
565 unsigned int eax, ebx, ecx, edx, op; 564 unsigned int eax, ebx, ecx, edx, op;
566 union _cpuid4_leaf_eax cache_eax; 565 union _cpuid4_leaf_eax cache_eax;
@@ -580,7 +579,7 @@ static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
580 return i; 579 return i;
581} 580}
582 581
583void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) 582void init_amd_cacheinfo(struct cpuinfo_x86 *c)
584{ 583{
585 584
586 if (cpu_has_topoext) { 585 if (cpu_has_topoext) {
@@ -593,7 +592,7 @@ void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
593 } 592 }
594} 593}
595 594
596unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 595unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
597{ 596{
598 /* Cache sizes */ 597 /* Cache sizes */
599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; 598 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
@@ -744,7 +743,7 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
744 743
745#ifdef CONFIG_SMP 744#ifdef CONFIG_SMP
746 745
747static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) 746static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
748{ 747{
749 struct _cpuid4_info *this_leaf; 748 struct _cpuid4_info *this_leaf;
750 int i, sibling; 749 int i, sibling;
@@ -793,7 +792,7 @@ static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
793 return 1; 792 return 1;
794} 793}
795 794
796static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 795static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
797{ 796{
798 struct _cpuid4_info *this_leaf, *sibling_leaf; 797 struct _cpuid4_info *this_leaf, *sibling_leaf;
799 unsigned long num_threads_sharing; 798 unsigned long num_threads_sharing;
@@ -828,7 +827,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
828 } 827 }
829 } 828 }
830} 829}
831static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 830static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
832{ 831{
833 struct _cpuid4_info *this_leaf, *sibling_leaf; 832 struct _cpuid4_info *this_leaf, *sibling_leaf;
834 int sibling; 833 int sibling;
@@ -841,16 +840,16 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
841 } 840 }
842} 841}
843#else 842#else
844static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 843static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
845{ 844{
846} 845}
847 846
848static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 847static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
849{ 848{
850} 849}
851#endif 850#endif
852 851
853static void __cpuinit free_cache_attributes(unsigned int cpu) 852static void free_cache_attributes(unsigned int cpu)
854{ 853{
855 int i; 854 int i;
856 855
@@ -861,7 +860,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
861 per_cpu(ici_cpuid4_info, cpu) = NULL; 860 per_cpu(ici_cpuid4_info, cpu) = NULL;
862} 861}
863 862
864static void __cpuinit get_cpu_leaves(void *_retval) 863static void get_cpu_leaves(void *_retval)
865{ 864{
866 int j, *retval = _retval, cpu = smp_processor_id(); 865 int j, *retval = _retval, cpu = smp_processor_id();
867 866
@@ -881,7 +880,7 @@ static void __cpuinit get_cpu_leaves(void *_retval)
881 } 880 }
882} 881}
883 882
884static int __cpuinit detect_cache_attributes(unsigned int cpu) 883static int detect_cache_attributes(unsigned int cpu)
885{ 884{
886 int retval; 885 int retval;
887 886
@@ -1015,7 +1014,7 @@ static struct attribute *default_attrs[] = {
1015}; 1014};
1016 1015
1017#ifdef CONFIG_AMD_NB 1016#ifdef CONFIG_AMD_NB
1018static struct attribute ** __cpuinit amd_l3_attrs(void) 1017static struct attribute **amd_l3_attrs(void)
1019{ 1018{
1020 static struct attribute **attrs; 1019 static struct attribute **attrs;
1021 int n; 1020 int n;
@@ -1091,7 +1090,7 @@ static struct kobj_type ktype_percpu_entry = {
1091 .sysfs_ops = &sysfs_ops, 1090 .sysfs_ops = &sysfs_ops,
1092}; 1091};
1093 1092
1094static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 1093static void cpuid4_cache_sysfs_exit(unsigned int cpu)
1095{ 1094{
1096 kfree(per_cpu(ici_cache_kobject, cpu)); 1095 kfree(per_cpu(ici_cache_kobject, cpu));
1097 kfree(per_cpu(ici_index_kobject, cpu)); 1096 kfree(per_cpu(ici_index_kobject, cpu));
@@ -1100,7 +1099,7 @@ static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1100 free_cache_attributes(cpu); 1099 free_cache_attributes(cpu);
1101} 1100}
1102 1101
1103static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) 1102static int cpuid4_cache_sysfs_init(unsigned int cpu)
1104{ 1103{
1105 int err; 1104 int err;
1106 1105
@@ -1132,7 +1131,7 @@ err_out:
1132static DECLARE_BITMAP(cache_dev_map, NR_CPUS); 1131static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1133 1132
1134/* Add/Remove cache interface for CPU device */ 1133/* Add/Remove cache interface for CPU device */
1135static int __cpuinit cache_add_dev(struct device *dev) 1134static int cache_add_dev(struct device *dev)
1136{ 1135{
1137 unsigned int cpu = dev->id; 1136 unsigned int cpu = dev->id;
1138 unsigned long i, j; 1137 unsigned long i, j;
@@ -1183,7 +1182,7 @@ static int __cpuinit cache_add_dev(struct device *dev)
1183 return 0; 1182 return 0;
1184} 1183}
1185 1184
1186static void __cpuinit cache_remove_dev(struct device *dev) 1185static void cache_remove_dev(struct device *dev)
1187{ 1186{
1188 unsigned int cpu = dev->id; 1187 unsigned int cpu = dev->id;
1189 unsigned long i; 1188 unsigned long i;
@@ -1200,8 +1199,8 @@ static void __cpuinit cache_remove_dev(struct device *dev)
1200 cpuid4_cache_sysfs_exit(cpu); 1199 cpuid4_cache_sysfs_exit(cpu);
1201} 1200}
1202 1201
1203static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, 1202static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1204 unsigned long action, void *hcpu) 1203 unsigned long action, void *hcpu)
1205{ 1204{
1206 unsigned int cpu = (unsigned long)hcpu; 1205 unsigned int cpu = (unsigned long)hcpu;
1207 struct device *dev; 1206 struct device *dev;
@@ -1220,7 +1219,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1220 return NOTIFY_OK; 1219 return NOTIFY_OK;
1221} 1220}
1222 1221
1223static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { 1222static struct notifier_block cacheinfo_cpu_notifier = {
1224 .notifier_call = cacheinfo_cpu_callback, 1223 .notifier_call = cacheinfo_cpu_callback,
1225}; 1224};
1226 1225
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index e2703520d120..c370e1c4468b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -111,8 +111,8 @@ static struct severity {
111#ifdef CONFIG_MEMORY_FAILURE 111#ifdef CONFIG_MEMORY_FAILURE
112 MCESEV( 112 MCESEV(
113 KEEP, "Action required but unaffected thread is continuable", 113 KEEP, "Action required but unaffected thread is continuable",
114 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR), 114 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
115 MCGMASK(MCG_STATUS_RIPV, MCG_STATUS_RIPV) 115 MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
116 ), 116 ),
117 MCESEV( 117 MCESEV(
118 AR, "Action required: data load error in a user process", 118 AR, "Action required: data load error in a user process",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bf49cdbb010f..87a65c939bcd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1363,7 +1363,7 @@ int mce_notify_irq(void)
1363} 1363}
1364EXPORT_SYMBOL_GPL(mce_notify_irq); 1364EXPORT_SYMBOL_GPL(mce_notify_irq);
1365 1365
1366static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1366static int __mcheck_cpu_mce_banks_init(void)
1367{ 1367{
1368 int i; 1368 int i;
1369 u8 num_banks = mca_cfg.banks; 1369 u8 num_banks = mca_cfg.banks;
@@ -1384,7 +1384,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1384/* 1384/*
1385 * Initialize Machine Checks for a CPU. 1385 * Initialize Machine Checks for a CPU.
1386 */ 1386 */
1387static int __cpuinit __mcheck_cpu_cap_init(void) 1387static int __mcheck_cpu_cap_init(void)
1388{ 1388{
1389 unsigned b; 1389 unsigned b;
1390 u64 cap; 1390 u64 cap;
@@ -1483,7 +1483,7 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1483} 1483}
1484 1484
1485/* Add per CPU specific workarounds here */ 1485/* Add per CPU specific workarounds here */
1486static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1486static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1487{ 1487{
1488 struct mca_config *cfg = &mca_cfg; 1488 struct mca_config *cfg = &mca_cfg;
1489 1489
@@ -1593,7 +1593,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1593 return 0; 1593 return 0;
1594} 1594}
1595 1595
1596static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1596static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1597{ 1597{
1598 if (c->x86 != 5) 1598 if (c->x86 != 5)
1599 return 0; 1599 return 0;
@@ -1664,7 +1664,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
1664 * Called for each booted CPU to set up machine checks. 1664 * Called for each booted CPU to set up machine checks.
1665 * Must be called with preempt off: 1665 * Must be called with preempt off:
1666 */ 1666 */
1667void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) 1667void mcheck_cpu_init(struct cpuinfo_x86 *c)
1668{ 1668{
1669 if (mca_cfg.disabled) 1669 if (mca_cfg.disabled)
1670 return; 1670 return;
@@ -2082,7 +2082,6 @@ static struct bus_type mce_subsys = {
2082 2082
2083DEFINE_PER_CPU(struct device *, mce_device); 2083DEFINE_PER_CPU(struct device *, mce_device);
2084 2084
2085__cpuinitdata
2086void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 2085void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2087 2086
2088static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) 2087static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
@@ -2228,7 +2227,7 @@ static void mce_device_release(struct device *dev)
2228} 2227}
2229 2228
2230/* Per cpu device init. All of the cpus still share the same ctrl bank: */ 2229/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2231static __cpuinit int mce_device_create(unsigned int cpu) 2230static int mce_device_create(unsigned int cpu)
2232{ 2231{
2233 struct device *dev; 2232 struct device *dev;
2234 int err; 2233 int err;
@@ -2274,7 +2273,7 @@ error:
2274 return err; 2273 return err;
2275} 2274}
2276 2275
2277static __cpuinit void mce_device_remove(unsigned int cpu) 2276static void mce_device_remove(unsigned int cpu)
2278{ 2277{
2279 struct device *dev = per_cpu(mce_device, cpu); 2278 struct device *dev = per_cpu(mce_device, cpu);
2280 int i; 2279 int i;
@@ -2294,7 +2293,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
2294} 2293}
2295 2294
2296/* Make sure there are no machine checks on offlined CPUs. */ 2295/* Make sure there are no machine checks on offlined CPUs. */
2297static void __cpuinit mce_disable_cpu(void *h) 2296static void mce_disable_cpu(void *h)
2298{ 2297{
2299 unsigned long action = *(unsigned long *)h; 2298 unsigned long action = *(unsigned long *)h;
2300 int i; 2299 int i;
@@ -2312,7 +2311,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2312 } 2311 }
2313} 2312}
2314 2313
2315static void __cpuinit mce_reenable_cpu(void *h) 2314static void mce_reenable_cpu(void *h)
2316{ 2315{
2317 unsigned long action = *(unsigned long *)h; 2316 unsigned long action = *(unsigned long *)h;
2318 int i; 2317 int i;
@@ -2331,7 +2330,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2331} 2330}
2332 2331
2333/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 2332/* Get notified when a cpu comes on/off. Be hotplug friendly. */
2334static int __cpuinit 2333static int
2335mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 2334mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2336{ 2335{
2337 unsigned int cpu = (unsigned long)hcpu; 2336 unsigned int cpu = (unsigned long)hcpu;
@@ -2367,7 +2366,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2367 return NOTIFY_OK; 2366 return NOTIFY_OK;
2368} 2367}
2369 2368
2370static struct notifier_block mce_cpu_notifier __cpuinitdata = { 2369static struct notifier_block mce_cpu_notifier = {
2371 .notifier_call = mce_cpu_callback, 2370 .notifier_call = mce_cpu_callback,
2372}; 2371};
2373 2372
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9cb52767999a..603df4f74640 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -458,10 +458,8 @@ static struct kobj_type threshold_ktype = {
458 .default_attrs = default_attrs, 458 .default_attrs = default_attrs,
459}; 459};
460 460
461static __cpuinit int allocate_threshold_blocks(unsigned int cpu, 461static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
462 unsigned int bank, 462 unsigned int block, u32 address)
463 unsigned int block,
464 u32 address)
465{ 463{
466 struct threshold_block *b = NULL; 464 struct threshold_block *b = NULL;
467 u32 low, high; 465 u32 low, high;
@@ -543,7 +541,7 @@ out_free:
543 return err; 541 return err;
544} 542}
545 543
546static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) 544static int __threshold_add_blocks(struct threshold_bank *b)
547{ 545{
548 struct list_head *head = &b->blocks->miscj; 546 struct list_head *head = &b->blocks->miscj;
549 struct threshold_block *pos = NULL; 547 struct threshold_block *pos = NULL;
@@ -567,7 +565,7 @@ static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
567 return err; 565 return err;
568} 566}
569 567
570static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 568static int threshold_create_bank(unsigned int cpu, unsigned int bank)
571{ 569{
572 struct device *dev = per_cpu(mce_device, cpu); 570 struct device *dev = per_cpu(mce_device, cpu);
573 struct amd_northbridge *nb = NULL; 571 struct amd_northbridge *nb = NULL;
@@ -632,7 +630,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
632} 630}
633 631
634/* create dir/files for all valid threshold banks */ 632/* create dir/files for all valid threshold banks */
635static __cpuinit int threshold_create_device(unsigned int cpu) 633static int threshold_create_device(unsigned int cpu)
636{ 634{
637 unsigned int bank; 635 unsigned int bank;
638 struct threshold_bank **bp; 636 struct threshold_bank **bp;
@@ -736,7 +734,7 @@ static void threshold_remove_device(unsigned int cpu)
736} 734}
737 735
738/* get notified when a cpu comes on/off */ 736/* get notified when a cpu comes on/off */
739static void __cpuinit 737static void
740amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) 738amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
741{ 739{
742 switch (action) { 740 switch (action) {
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 41e8e00a6637..3eec7de76efb 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -240,8 +240,7 @@ __setup("int_pln_enable", int_pln_enable_setup);
240 240
241#ifdef CONFIG_SYSFS 241#ifdef CONFIG_SYSFS
242/* Add/Remove thermal_throttle interface for CPU device: */ 242/* Add/Remove thermal_throttle interface for CPU device: */
243static __cpuinit int thermal_throttle_add_dev(struct device *dev, 243static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
244 unsigned int cpu)
245{ 244{
246 int err; 245 int err;
247 struct cpuinfo_x86 *c = &cpu_data(cpu); 246 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -267,7 +266,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev,
267 return err; 266 return err;
268} 267}
269 268
270static __cpuinit void thermal_throttle_remove_dev(struct device *dev) 269static void thermal_throttle_remove_dev(struct device *dev)
271{ 270{
272 sysfs_remove_group(&dev->kobj, &thermal_attr_group); 271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
273} 272}
@@ -276,7 +275,7 @@ static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
276static DEFINE_MUTEX(therm_cpu_lock); 275static DEFINE_MUTEX(therm_cpu_lock);
277 276
278/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 277/* Get notified when a cpu comes on/off. Be hotplug friendly. */
279static __cpuinit int 278static int
280thermal_throttle_cpu_callback(struct notifier_block *nfb, 279thermal_throttle_cpu_callback(struct notifier_block *nfb,
281 unsigned long action, 280 unsigned long action,
282 void *hcpu) 281 void *hcpu)
@@ -307,7 +306,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
307 return notifier_from_errno(err); 306 return notifier_from_errno(err);
308} 307}
309 308
310static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 309static struct notifier_block thermal_throttle_cpu_notifier =
311{ 310{
312 .notifier_call = thermal_throttle_cpu_callback, 311 .notifier_call = thermal_throttle_cpu_callback,
313}; 312};
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9e581c5cf6d0..a7c7305030cc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1295,7 +1295,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1295struct event_constraint emptyconstraint; 1295struct event_constraint emptyconstraint;
1296struct event_constraint unconstrained; 1296struct event_constraint unconstrained;
1297 1297
1298static int __cpuinit 1298static int
1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1300{ 1300{
1301 unsigned int cpu = (long)hcpu; 1301 unsigned int cpu = (long)hcpu;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 5f0581e713c2..e09f0bfb7b8f 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -851,7 +851,7 @@ static void clear_APIC_ibs(void *dummy)
851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); 851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
852} 852}
853 853
854static int __cpuinit 854static int
855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
856{ 856{
857 switch (action & ~CPU_TASKS_FROZEN) { 857 switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index c0c661adf03e..754291adec33 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -288,13 +288,13 @@ static struct pmu amd_l2_pmu = {
288 .read = amd_uncore_read, 288 .read = amd_uncore_read,
289}; 289};
290 290
291static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) 291static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
292{ 292{
293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, 293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
294 cpu_to_node(cpu)); 294 cpu_to_node(cpu));
295} 295}
296 296
297static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) 297static void amd_uncore_cpu_up_prepare(unsigned int cpu)
298{ 298{
299 struct amd_uncore *uncore; 299 struct amd_uncore *uncore;
300 300
@@ -322,8 +322,8 @@ static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
322} 322}
323 323
324static struct amd_uncore * 324static struct amd_uncore *
325__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, 325amd_uncore_find_online_sibling(struct amd_uncore *this,
326 struct amd_uncore * __percpu *uncores) 326 struct amd_uncore * __percpu *uncores)
327{ 327{
328 unsigned int cpu; 328 unsigned int cpu;
329 struct amd_uncore *that; 329 struct amd_uncore *that;
@@ -348,7 +348,7 @@ __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
348 return this; 348 return this;
349} 349}
350 350
351static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) 351static void amd_uncore_cpu_starting(unsigned int cpu)
352{ 352{
353 unsigned int eax, ebx, ecx, edx; 353 unsigned int eax, ebx, ecx, edx;
354 struct amd_uncore *uncore; 354 struct amd_uncore *uncore;
@@ -376,8 +376,8 @@ static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
376 } 376 }
377} 377}
378 378
379static void __cpuinit uncore_online(unsigned int cpu, 379static void uncore_online(unsigned int cpu,
380 struct amd_uncore * __percpu *uncores) 380 struct amd_uncore * __percpu *uncores)
381{ 381{
382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
383 383
@@ -388,7 +388,7 @@ static void __cpuinit uncore_online(unsigned int cpu,
388 cpumask_set_cpu(cpu, uncore->active_mask); 388 cpumask_set_cpu(cpu, uncore->active_mask);
389} 389}
390 390
391static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) 391static void amd_uncore_cpu_online(unsigned int cpu)
392{ 392{
393 if (amd_uncore_nb) 393 if (amd_uncore_nb)
394 uncore_online(cpu, amd_uncore_nb); 394 uncore_online(cpu, amd_uncore_nb);
@@ -397,8 +397,8 @@ static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
397 uncore_online(cpu, amd_uncore_l2); 397 uncore_online(cpu, amd_uncore_l2);
398} 398}
399 399
400static void __cpuinit uncore_down_prepare(unsigned int cpu, 400static void uncore_down_prepare(unsigned int cpu,
401 struct amd_uncore * __percpu *uncores) 401 struct amd_uncore * __percpu *uncores)
402{ 402{
403 unsigned int i; 403 unsigned int i;
404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); 404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
@@ -423,7 +423,7 @@ static void __cpuinit uncore_down_prepare(unsigned int cpu,
423 } 423 }
424} 424}
425 425
426static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) 426static void amd_uncore_cpu_down_prepare(unsigned int cpu)
427{ 427{
428 if (amd_uncore_nb) 428 if (amd_uncore_nb)
429 uncore_down_prepare(cpu, amd_uncore_nb); 429 uncore_down_prepare(cpu, amd_uncore_nb);
@@ -432,8 +432,7 @@ static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
432 uncore_down_prepare(cpu, amd_uncore_l2); 432 uncore_down_prepare(cpu, amd_uncore_l2);
433} 433}
434 434
435static void __cpuinit uncore_dead(unsigned int cpu, 435static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
436 struct amd_uncore * __percpu *uncores)
437{ 436{
438 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 437 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
439 438
@@ -445,7 +444,7 @@ static void __cpuinit uncore_dead(unsigned int cpu,
445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; 444 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
446} 445}
447 446
448static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) 447static void amd_uncore_cpu_dead(unsigned int cpu)
449{ 448{
450 if (amd_uncore_nb) 449 if (amd_uncore_nb)
451 uncore_dead(cpu, amd_uncore_nb); 450 uncore_dead(cpu, amd_uncore_nb);
@@ -454,7 +453,7 @@ static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
454 uncore_dead(cpu, amd_uncore_l2); 453 uncore_dead(cpu, amd_uncore_l2);
455} 454}
456 455
457static int __cpuinit 456static int
458amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, 457amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
459 void *hcpu) 458 void *hcpu)
460{ 459{
@@ -489,7 +488,7 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
489 return NOTIFY_OK; 488 return NOTIFY_OK;
490} 489}
491 490
492static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { 491static struct notifier_block amd_uncore_cpu_notifier_block = {
493 .notifier_call = amd_uncore_cpu_notifier, 492 .notifier_call = amd_uncore_cpu_notifier,
494 .priority = CPU_PRI_PERF + 1, 493 .priority = CPU_PRI_PERF + 1,
495}; 494};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 9dd99751ccf9..cad791dbde95 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3297,7 +3297,7 @@ static void __init uncore_pci_exit(void)
3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ 3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3298static LIST_HEAD(boxes_to_free); 3298static LIST_HEAD(boxes_to_free);
3299 3299
3300static void __cpuinit uncore_kfree_boxes(void) 3300static void uncore_kfree_boxes(void)
3301{ 3301{
3302 struct intel_uncore_box *box; 3302 struct intel_uncore_box *box;
3303 3303
@@ -3309,7 +3309,7 @@ static void __cpuinit uncore_kfree_boxes(void)
3309 } 3309 }
3310} 3310}
3311 3311
3312static void __cpuinit uncore_cpu_dying(int cpu) 3312static void uncore_cpu_dying(int cpu)
3313{ 3313{
3314 struct intel_uncore_type *type; 3314 struct intel_uncore_type *type;
3315 struct intel_uncore_pmu *pmu; 3315 struct intel_uncore_pmu *pmu;
@@ -3328,7 +3328,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
3328 } 3328 }
3329} 3329}
3330 3330
3331static int __cpuinit uncore_cpu_starting(int cpu) 3331static int uncore_cpu_starting(int cpu)
3332{ 3332{
3333 struct intel_uncore_type *type; 3333 struct intel_uncore_type *type;
3334 struct intel_uncore_pmu *pmu; 3334 struct intel_uncore_pmu *pmu;
@@ -3371,7 +3371,7 @@ static int __cpuinit uncore_cpu_starting(int cpu)
3371 return 0; 3371 return 0;
3372} 3372}
3373 3373
3374static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) 3374static int uncore_cpu_prepare(int cpu, int phys_id)
3375{ 3375{
3376 struct intel_uncore_type *type; 3376 struct intel_uncore_type *type;
3377 struct intel_uncore_pmu *pmu; 3377 struct intel_uncore_pmu *pmu;
@@ -3397,7 +3397,7 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
3397 return 0; 3397 return 0;
3398} 3398}
3399 3399
3400static void __cpuinit 3400static void
3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) 3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
3402{ 3402{
3403 struct intel_uncore_type *type; 3403 struct intel_uncore_type *type;
@@ -3435,7 +3435,7 @@ uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_c
3435 } 3435 }
3436} 3436}
3437 3437
3438static void __cpuinit uncore_event_exit_cpu(int cpu) 3438static void uncore_event_exit_cpu(int cpu)
3439{ 3439{
3440 int i, phys_id, target; 3440 int i, phys_id, target;
3441 3441
@@ -3463,7 +3463,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu)
3463 uncore_change_context(pci_uncores, cpu, target); 3463 uncore_change_context(pci_uncores, cpu, target);
3464} 3464}
3465 3465
3466static void __cpuinit uncore_event_init_cpu(int cpu) 3466static void uncore_event_init_cpu(int cpu)
3467{ 3467{
3468 int i, phys_id; 3468 int i, phys_id;
3469 3469
@@ -3479,8 +3479,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
3479 uncore_change_context(pci_uncores, -1, cpu); 3479 uncore_change_context(pci_uncores, -1, cpu);
3480} 3480}
3481 3481
3482static int 3482static int uncore_cpu_notifier(struct notifier_block *self,
3483 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 3483 unsigned long action, void *hcpu)
3484{ 3484{
3485 unsigned int cpu = (long)hcpu; 3485 unsigned int cpu = (long)hcpu;
3486 3486
@@ -3520,7 +3520,7 @@ static int
3520 return NOTIFY_OK; 3520 return NOTIFY_OK;
3521} 3521}
3522 3522
3523static struct notifier_block uncore_cpu_nb __cpuinitdata = { 3523static struct notifier_block uncore_cpu_nb = {
3524 .notifier_call = uncore_cpu_notifier, 3524 .notifier_call = uncore_cpu_notifier,
3525 /* 3525 /*
3526 * to migrate uncore events, our notifier should be executed 3526 * to migrate uncore events, our notifier should be executed
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index feca286c2bb4..88db010845cb 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -52,7 +52,7 @@ static inline int rdrand_long(unsigned long *v)
52 */ 52 */
53#define RESEED_LOOP ((512*128)/sizeof(unsigned long)) 53#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
54 54
55void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) 55void x86_init_rdrand(struct cpuinfo_x86 *c)
56{ 56{
57#ifdef CONFIG_ARCH_RANDOM 57#ifdef CONFIG_ARCH_RANDOM
58 unsigned long tmp; 58 unsigned long tmp;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d92b5dad15dd..f2cc63e9cf08 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -24,13 +24,13 @@ enum cpuid_regs {
24 CR_EBX 24 CR_EBX
25}; 25};
26 26
27void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) 27void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
28{ 28{
29 u32 max_level; 29 u32 max_level;
30 u32 regs[4]; 30 u32 regs[4];
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 static const struct cpuid_bit cpuid_bits[] = {
34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, 34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 4397e987a1cf..4c60eaf0571c 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -26,7 +26,7 @@
26 * exists, use it for populating initial_apicid and cpu topology 26 * exists, use it for populating initial_apicid and cpu topology
27 * detection. 27 * detection.
28 */ 28 */
29void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 29void detect_extended_topology(struct cpuinfo_x86 *c)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 unsigned int eax, ebx, ecx, edx, sub_index; 32 unsigned int eax, ebx, ecx, edx, sub_index;
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 28000743bbb0..aa0430d69b90 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -5,7 +5,7 @@
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include "cpu.h" 6#include "cpu.h"
7 7
8static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) 8static void early_init_transmeta(struct cpuinfo_x86 *c)
9{ 9{
10 u32 xlvl; 10 u32 xlvl;
11 11
@@ -17,7 +17,7 @@ static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
17 } 17 }
18} 18}
19 19
20static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) 20static void init_transmeta(struct cpuinfo_x86 *c)
21{ 21{
22 unsigned int cap_mask, uk, max, dummy; 22 unsigned int cap_mask, uk, max, dummy;
23 unsigned int cms_rev1, cms_rev2; 23 unsigned int cms_rev1, cms_rev2;
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { 101static const struct cpu_dev transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index fd2c37bf7acb..202759a14121 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static const struct cpu_dev __cpuinitconst umc_cpu_dev = { 11static const struct cpu_dev umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 03a36321ec54..7076878404ec 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -122,7 +122,7 @@ static bool __init vmware_platform(void)
122 * so that the kernel could just trust the hypervisor with providing a 122 * so that the kernel could just trust the hypervisor with providing a
123 * reliable virtual TSC that is suitable for timekeeping. 123 * reliable virtual TSC that is suitable for timekeeping.
124 */ 124 */
125static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) 125static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
126{ 126{
127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); 128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1e4dbcfe6d31..7d9481c743f8 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -137,7 +137,7 @@ static const struct file_operations cpuid_fops = {
137 .open = cpuid_open, 137 .open = cpuid_open,
138}; 138};
139 139
140static __cpuinit int cpuid_device_create(int cpu) 140static int cpuid_device_create(int cpu)
141{ 141{
142 struct device *dev; 142 struct device *dev;
143 143
@@ -151,9 +151,8 @@ static void cpuid_device_destroy(int cpu)
151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
152} 152}
153 153
154static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, 154static int cpuid_class_cpu_callback(struct notifier_block *nfb,
155 unsigned long action, 155 unsigned long action, void *hcpu)
156 void *hcpu)
157{ 156{
158 unsigned int cpu = (unsigned long)hcpu; 157 unsigned int cpu = (unsigned long)hcpu;
159 int err = 0; 158 int err = 0;
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 4934890e4db2..69eb2fa25494 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -133,7 +133,7 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
133{ 133{
134} 134}
135 135
136void __cpuinit x86_of_pci_init(void) 136void x86_of_pci_init(void)
137{ 137{
138 pcibios_enable_irq = x86_of_pci_irq_enable; 138 pcibios_enable_irq = x86_of_pci_irq_enable;
139 pcibios_disable_irq = x86_of_pci_irq_disable; 139 pcibios_disable_irq = x86_of_pci_irq_disable;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b90dd3f..63bdb29b2549 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
196static void __init intel_remapping_check(int num, int slot, int func) 196static void __init intel_remapping_check(int num, int slot, int func)
197{ 197{
198 u8 revision; 198 u8 revision;
199 u16 device;
199 200
201 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
200 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); 202 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
201 203
202 /* 204 /*
203 * Revision 0x13 of this chipset supports irq remapping 205 * Revision 13 of all triggering devices id in this quirk have
204 * but has an erratum that breaks its behavior, flag it as such 206 * a problem draining interrupts when irq remapping is enabled,
207 * and should be flagged as broken. Additionally revisions 0x12
208 * and 0x22 of device id 0x3405 has this problem.
205 */ 209 */
206 if (revision == 0x13) 210 if (revision == 0x13)
207 set_irq_remapping_broken(); 211 set_irq_remapping_broken();
212 else if ((device == 0x3405) &&
213 ((revision == 0x12) ||
214 (revision == 0x22)))
215 set_irq_remapping_broken();
208 216
209} 217}
210 218
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
239 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 247 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
240 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, 248 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
241 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 249 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
250 { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
251 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
242 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 252 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
243 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 253 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
244 {} 254 {}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e65ddc62e113..5dd87a89f011 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -292,7 +292,6 @@ ENDPROC(start_cpu0)
292 * If cpu hotplug is not supported then this code can go in init section 292 * If cpu hotplug is not supported then this code can go in init section
293 * which will be freed later 293 * which will be freed later
294 */ 294 */
295__CPUINIT
296ENTRY(startup_32_smp) 295ENTRY(startup_32_smp)
297 cld 296 cld
298 movl $(__BOOT_DS),%eax 297 movl $(__BOOT_DS),%eax
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 5e4d8a8a5c40..e1aabdb314c8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -512,21 +512,6 @@ ENTRY(phys_base)
512 512
513#include "../../x86/xen/xen-head.S" 513#include "../../x86/xen/xen-head.S"
514 514
515 .section .bss, "aw", @nobits
516 .align L1_CACHE_BYTES
517ENTRY(idt_table)
518 .skip IDT_ENTRIES * 16
519
520 .align L1_CACHE_BYTES
521ENTRY(debug_idt_table)
522 .skip IDT_ENTRIES * 16
523
524#ifdef CONFIG_TRACING
525 .align L1_CACHE_BYTES
526ENTRY(trace_idt_table)
527 .skip IDT_ENTRIES * 16
528#endif
529
530 __PAGE_ALIGNED_BSS 515 __PAGE_ALIGNED_BSS
531NEXT_PAGE(empty_zero_page) 516NEXT_PAGE(empty_zero_page)
532 .skip PAGE_SIZE 517 .skip PAGE_SIZE
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index b627746f6b1a..5d576ab34403 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -108,15 +108,15 @@ EXPORT_SYMBOL(unlazy_fpu);
108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
109unsigned int xstate_size; 109unsigned int xstate_size;
110EXPORT_SYMBOL_GPL(xstate_size); 110EXPORT_SYMBOL_GPL(xstate_size);
111static struct i387_fxsave_struct fx_scratch __cpuinitdata; 111static struct i387_fxsave_struct fx_scratch;
112 112
113static void __cpuinit mxcsr_feature_mask_init(void) 113static void mxcsr_feature_mask_init(void)
114{ 114{
115 unsigned long mask = 0; 115 unsigned long mask = 0;
116 116
117 if (cpu_has_fxsr) { 117 if (cpu_has_fxsr) {
118 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); 118 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
119 asm volatile("fxsave %0" : : "m" (fx_scratch)); 119 asm volatile("fxsave %0" : "+m" (fx_scratch));
120 mask = fx_scratch.mxcsr_mask; 120 mask = fx_scratch.mxcsr_mask;
121 if (mask == 0) 121 if (mask == 0)
122 mask = 0x0000ffbf; 122 mask = 0x0000ffbf;
@@ -124,7 +124,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
124 mxcsr_feature_mask &= mask; 124 mxcsr_feature_mask &= mask;
125} 125}
126 126
127static void __cpuinit init_thread_xstate(void) 127static void init_thread_xstate(void)
128{ 128{
129 /* 129 /*
130 * Note that xstate_size might be overwriten later during 130 * Note that xstate_size might be overwriten later during
@@ -153,7 +153,7 @@ static void __cpuinit init_thread_xstate(void)
153 * into all processes. 153 * into all processes.
154 */ 154 */
155 155
156void __cpuinit fpu_init(void) 156void fpu_init(void)
157{ 157{
158 unsigned long cr0; 158 unsigned long cr0;
159 unsigned long cr4_mask = 0; 159 unsigned long cr4_mask = 0;
@@ -608,7 +608,7 @@ static int __init no_387(char *s)
608 608
609__setup("no387", no_387); 609__setup("no387", no_387);
610 610
611void __cpuinit fpu_detect(struct cpuinfo_x86 *c) 611void fpu_detect(struct cpuinfo_x86 *c)
612{ 612{
613 unsigned long cr0; 613 unsigned long cr0;
614 u16 fsw, fcw; 614 u16 fsw, fcw;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 344faf8d0d62..4186755f1d7c 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -119,7 +119,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
119/* 119/*
120 * allocate per-cpu stacks for hardirq and for softirq processing 120 * allocate per-cpu stacks for hardirq and for softirq processing
121 */ 121 */
122void __cpuinit irq_ctx_init(int cpu) 122void irq_ctx_init(int cpu)
123{ 123{
124 union irq_ctx *irqctx; 124 union irq_ctx *irqctx;
125 125
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index cd6d9a5a42f6..a96d32cc55b8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -320,7 +320,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
320 apic_write(APIC_EOI, APIC_EOI_ACK); 320 apic_write(APIC_EOI, APIC_EOI_ACK);
321} 321}
322 322
323void __cpuinit kvm_guest_cpu_init(void) 323void kvm_guest_cpu_init(void)
324{ 324{
325 if (!kvm_para_available()) 325 if (!kvm_para_available())
326 return; 326 return;
@@ -421,7 +421,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
421 native_smp_prepare_boot_cpu(); 421 native_smp_prepare_boot_cpu();
422} 422}
423 423
424static void __cpuinit kvm_guest_cpu_online(void *dummy) 424static void kvm_guest_cpu_online(void *dummy)
425{ 425{
426 kvm_guest_cpu_init(); 426 kvm_guest_cpu_init();
427} 427}
@@ -435,8 +435,8 @@ static void kvm_guest_cpu_offline(void *dummy)
435 apf_task_wake_all(); 435 apf_task_wake_all();
436} 436}
437 437
438static int __cpuinit kvm_cpu_notify(struct notifier_block *self, 438static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
439 unsigned long action, void *hcpu) 439 void *hcpu)
440{ 440{
441 int cpu = (unsigned long)hcpu; 441 int cpu = (unsigned long)hcpu;
442 switch (action) { 442 switch (action) {
@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
455 return NOTIFY_OK; 455 return NOTIFY_OK;
456} 456}
457 457
458static struct notifier_block __cpuinitdata kvm_cpu_notifier = { 458static struct notifier_block kvm_cpu_notifier = {
459 .notifier_call = kvm_cpu_notify, 459 .notifier_call = kvm_cpu_notify,
460}; 460};
461#endif 461#endif
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1f354f4b602b..1570e0741344 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -182,7 +182,7 @@ static void kvm_restore_sched_clock_state(void)
182} 182}
183 183
184#ifdef CONFIG_X86_LOCAL_APIC 184#ifdef CONFIG_X86_LOCAL_APIC
185static void __cpuinit kvm_setup_secondary_clock(void) 185static void kvm_setup_secondary_clock(void)
186{ 186{
187 /* 187 /*
188 * Now that the first cpu already had this clocksource initialized, 188 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 47ebb1dbfbcb..7a0adb7ee433 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -220,12 +220,13 @@ int apply_microcode_amd(int cpu)
220 return 0; 220 return 0;
221 } 221 }
222 222
223 if (__apply_microcode_amd(mc_amd)) 223 if (__apply_microcode_amd(mc_amd)) {
224 pr_err("CPU%d: update failed for patch_level=0x%08x\n", 224 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
225 cpu, mc_amd->hdr.patch_id); 225 cpu, mc_amd->hdr.patch_id);
226 else 226 return -1;
227 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, 227 }
228 mc_amd->hdr.patch_id); 228 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
229 mc_amd->hdr.patch_id);
229 230
230 uci->cpu_sig.rev = mc_amd->hdr.patch_id; 231 uci->cpu_sig.rev = mc_amd->hdr.patch_id;
231 c->microcode = mc_amd->hdr.patch_id; 232 c->microcode = mc_amd->hdr.patch_id;
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1ac6e9aee766..1d14ffee5749 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -82,7 +82,7 @@ static struct cpio_data __init find_ucode_in_initrd(void)
82 * load_microcode_amd() to save equivalent cpu table and microcode patches in 82 * load_microcode_amd() to save equivalent cpu table and microcode patches in
83 * kernel heap memory. 83 * kernel heap memory.
84 */ 84 */
85static void __cpuinit apply_ucode_in_initrd(void *ucode, size_t size) 85static void apply_ucode_in_initrd(void *ucode, size_t size)
86{ 86{
87 struct equiv_cpu_entry *eq; 87 struct equiv_cpu_entry *eq;
88 u32 *header; 88 u32 *header;
@@ -206,7 +206,7 @@ u8 amd_bsp_mpb[MPB_MAX_SIZE];
206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which 206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
207 * is used upon resume from suspend. 207 * is used upon resume from suspend.
208 */ 208 */
209void __cpuinit load_ucode_amd_ap(void) 209void load_ucode_amd_ap(void)
210{ 210{
211 struct microcode_amd *mc; 211 struct microcode_amd *mc;
212 unsigned long *initrd; 212 unsigned long *initrd;
@@ -238,7 +238,7 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 238 uci->cpu_sig.sig = cpuid_eax(0x00000001);
239} 239}
240#else 240#else
241static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
242 struct ucode_cpu_info *uci) 242 struct ucode_cpu_info *uci)
243{ 243{
244 u32 rev, eax; 244 u32 rev, eax;
@@ -252,7 +252,7 @@ static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
253} 253}
254 254
255void __cpuinit load_ucode_amd_ap(void) 255void load_ucode_amd_ap(void)
256{ 256{
257 unsigned int cpu = smp_processor_id(); 257 unsigned int cpu = smp_processor_id();
258 258
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 22db92bbdf1a..15c987698b0f 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -468,7 +468,7 @@ static struct syscore_ops mc_syscore_ops = {
468 .resume = mc_bp_resume, 468 .resume = mc_bp_resume,
469}; 469};
470 470
471static __cpuinit int 471static int
472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
473{ 473{
474 unsigned int cpu = (unsigned long)hcpu; 474 unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 86119f63db0c..be7f8514f577 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -41,7 +41,7 @@
41 * 41 *
42 * x86_vendor() gets vendor information directly through cpuid. 42 * x86_vendor() gets vendor information directly through cpuid.
43 */ 43 */
44static int __cpuinit x86_vendor(void) 44static int x86_vendor(void)
45{ 45{
46 u32 eax = 0x00000000; 46 u32 eax = 0x00000000;
47 u32 ebx, ecx = 0, edx; 47 u32 ebx, ecx = 0, edx;
@@ -57,7 +57,7 @@ static int __cpuinit x86_vendor(void)
57 return X86_VENDOR_UNKNOWN; 57 return X86_VENDOR_UNKNOWN;
58} 58}
59 59
60static int __cpuinit x86_family(void) 60static int x86_family(void)
61{ 61{
62 u32 eax = 0x00000001; 62 u32 eax = 0x00000001;
63 u32 ebx, ecx = 0, edx; 63 u32 ebx, ecx = 0, edx;
@@ -96,7 +96,7 @@ void __init load_ucode_bsp(void)
96 } 96 }
97} 97}
98 98
99void __cpuinit load_ucode_ap(void) 99void load_ucode_ap(void)
100{ 100{
101 int vendor, x86; 101 int vendor, x86;
102 102
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index dabef95506f3..1575deb2e636 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -34,7 +34,7 @@ struct mc_saved_data {
34 struct microcode_intel **mc_saved; 34 struct microcode_intel **mc_saved;
35} mc_saved_data; 35} mc_saved_data;
36 36
37static enum ucode_state __cpuinit 37static enum ucode_state
38generic_load_microcode_early(struct microcode_intel **mc_saved_p, 38generic_load_microcode_early(struct microcode_intel **mc_saved_p,
39 unsigned int mc_saved_count, 39 unsigned int mc_saved_count,
40 struct ucode_cpu_info *uci) 40 struct ucode_cpu_info *uci)
@@ -69,7 +69,7 @@ out:
69 return state; 69 return state;
70} 70}
71 71
72static void __cpuinit 72static void
73microcode_pointer(struct microcode_intel **mc_saved, 73microcode_pointer(struct microcode_intel **mc_saved,
74 unsigned long *mc_saved_in_initrd, 74 unsigned long *mc_saved_in_initrd,
75 unsigned long initrd_start, int mc_saved_count) 75 unsigned long initrd_start, int mc_saved_count)
@@ -82,7 +82,7 @@ microcode_pointer(struct microcode_intel **mc_saved,
82} 82}
83 83
84#ifdef CONFIG_X86_32 84#ifdef CONFIG_X86_32
85static void __cpuinit 85static void
86microcode_phys(struct microcode_intel **mc_saved_tmp, 86microcode_phys(struct microcode_intel **mc_saved_tmp,
87 struct mc_saved_data *mc_saved_data) 87 struct mc_saved_data *mc_saved_data)
88{ 88{
@@ -101,7 +101,7 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
101} 101}
102#endif 102#endif
103 103
104static enum ucode_state __cpuinit 104static enum ucode_state
105load_microcode(struct mc_saved_data *mc_saved_data, 105load_microcode(struct mc_saved_data *mc_saved_data,
106 unsigned long *mc_saved_in_initrd, 106 unsigned long *mc_saved_in_initrd,
107 unsigned long initrd_start, 107 unsigned long initrd_start,
@@ -375,7 +375,7 @@ do { \
375#define native_wrmsr(msr, low, high) \ 375#define native_wrmsr(msr, low, high) \
376 native_write_msr(msr, low, high); 376 native_write_msr(msr, low, high);
377 377
378static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci) 378static int collect_cpu_info_early(struct ucode_cpu_info *uci)
379{ 379{
380 unsigned int val[2]; 380 unsigned int val[2];
381 u8 x86, x86_model; 381 u8 x86, x86_model;
@@ -584,7 +584,7 @@ scan_microcode(unsigned long start, unsigned long end,
584/* 584/*
585 * Print ucode update info. 585 * Print ucode update info.
586 */ 586 */
587static void __cpuinit 587static void
588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
589{ 589{
590 int cpu = smp_processor_id(); 590 int cpu = smp_processor_id();
@@ -605,7 +605,7 @@ static int current_mc_date;
605/* 605/*
606 * Print early updated ucode info after printk works. This is delayed info dump. 606 * Print early updated ucode info after printk works. This is delayed info dump.
607 */ 607 */
608void __cpuinit show_ucode_info_early(void) 608void show_ucode_info_early(void)
609{ 609{
610 struct ucode_cpu_info uci; 610 struct ucode_cpu_info uci;
611 611
@@ -621,7 +621,7 @@ void __cpuinit show_ucode_info_early(void)
621 * mc_saved_data.mc_saved and delay printing microcode info in 621 * mc_saved_data.mc_saved and delay printing microcode info in
622 * show_ucode_info_early() until printk() works. 622 * show_ucode_info_early() until printk() works.
623 */ 623 */
624static void __cpuinit print_ucode(struct ucode_cpu_info *uci) 624static void print_ucode(struct ucode_cpu_info *uci)
625{ 625{
626 struct microcode_intel *mc_intel; 626 struct microcode_intel *mc_intel;
627 int *delay_ucode_info_p; 627 int *delay_ucode_info_p;
@@ -643,12 +643,12 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
643 * Flush global tlb. We only do this in x86_64 where paging has been enabled 643 * Flush global tlb. We only do this in x86_64 where paging has been enabled
644 * already and PGE should be enabled as well. 644 * already and PGE should be enabled as well.
645 */ 645 */
646static inline void __cpuinit flush_tlb_early(void) 646static inline void flush_tlb_early(void)
647{ 647{
648 __native_flush_tlb_global_irq_disabled(); 648 __native_flush_tlb_global_irq_disabled();
649} 649}
650 650
651static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) 651static inline void print_ucode(struct ucode_cpu_info *uci)
652{ 652{
653 struct microcode_intel *mc_intel; 653 struct microcode_intel *mc_intel;
654 654
@@ -660,8 +660,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
660} 660}
661#endif 661#endif
662 662
663static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, 663static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
664 struct ucode_cpu_info *uci) 664 struct ucode_cpu_info *uci)
665{ 665{
666 struct microcode_intel *mc_intel; 666 struct microcode_intel *mc_intel;
667 unsigned int val[2]; 667 unsigned int val[2];
@@ -763,7 +763,7 @@ load_ucode_intel_bsp(void)
763#endif 763#endif
764} 764}
765 765
766void __cpuinit load_ucode_intel_ap(void) 766void load_ucode_intel_ap(void)
767{ 767{
768 struct mc_saved_data *mc_saved_data_p; 768 struct mc_saved_data *mc_saved_data_p;
769 struct ucode_cpu_info uci; 769 struct ucode_cpu_info uci;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index ac861b8348e2..f4c886d9165c 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -24,14 +24,14 @@ struct pci_hostbridge_probe {
24 u32 device; 24 u32 device;
25}; 25};
26 26
27static u64 __cpuinitdata fam10h_pci_mmconf_base; 27static u64 fam10h_pci_mmconf_base;
28 28
29static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { 29static struct pci_hostbridge_probe pci_probes[] = {
30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, 31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
32}; 32};
33 33
34static int __cpuinit cmp_range(const void *x1, const void *x2) 34static int cmp_range(const void *x1, const void *x2)
35{ 35{
36 const struct range *r1 = x1; 36 const struct range *r1 = x1;
37 const struct range *r2 = x2; 37 const struct range *r2 = x2;
@@ -49,7 +49,7 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ 49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) 50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) 51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
52static void __cpuinit get_fam10h_pci_mmconf_base(void) 52static void get_fam10h_pci_mmconf_base(void)
53{ 53{
54 int i; 54 int i;
55 unsigned bus; 55 unsigned bus;
@@ -166,7 +166,7 @@ out:
166 fam10h_pci_mmconf_base = base; 166 fam10h_pci_mmconf_base = base;
167} 167}
168 168
169void __cpuinit fam10h_check_enable_mmcfg(void) 169void fam10h_check_enable_mmcfg(void)
170{ 170{
171 u64 val; 171 u64 val;
172 u32 address; 172 u32 address;
@@ -230,7 +230,7 @@ static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
230 {} 230 {}
231}; 231};
232 232
233/* Called from a __cpuinit function, but only on the BSP. */ 233/* Called from a non __init function, but only on the BSP. */
234void __ref check_enable_amd_mmconf_dmi(void) 234void __ref check_enable_amd_mmconf_dmi(void)
235{ 235{
236 dmi_check_system(mmconf_dmi_table); 236 dmi_check_system(mmconf_dmi_table);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index ce130493b802..88458faea2f8 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -200,7 +200,7 @@ static const struct file_operations msr_fops = {
200 .compat_ioctl = msr_ioctl, 200 .compat_ioctl = msr_ioctl,
201}; 201};
202 202
203static int __cpuinit msr_device_create(int cpu) 203static int msr_device_create(int cpu)
204{ 204{
205 struct device *dev; 205 struct device *dev;
206 206
@@ -214,8 +214,8 @@ static void msr_device_destroy(int cpu)
214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); 214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
215} 215}
216 216
217static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, 217static int msr_class_cpu_callback(struct notifier_block *nfb,
218 unsigned long action, void *hcpu) 218 unsigned long action, void *hcpu)
219{ 219{
220 unsigned int cpu = (unsigned long)hcpu; 220 unsigned int cpu = (unsigned long)hcpu;
221 int err = 0; 221 int err = 0;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 81a5f5e8f142..83369e5a1d27 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -398,7 +398,7 @@ static void amd_e400_idle(void)
398 default_idle(); 398 default_idle();
399} 399}
400 400
401void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 401void select_idle_routine(const struct cpuinfo_x86 *c)
402{ 402{
403#ifdef CONFIG_SMP 403#ifdef CONFIG_SMP
404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) 404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e68709da8251..f8ec57815c05 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -170,7 +170,7 @@ static struct resource bss_resource = {
170 170
171#ifdef CONFIG_X86_32 171#ifdef CONFIG_X86_32
172/* cpu data as detected by the assembly code in head.S */ 172/* cpu data as detected by the assembly code in head.S */
173struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 173struct cpuinfo_x86 new_cpu_data = {
174 .wp_works_ok = -1, 174 .wp_works_ok = -1,
175}; 175};
176/* common cpu data for all cpus */ 176/* common cpu data for all cpus */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bfd348e99369..aecc98a93d1b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -130,7 +130,7 @@ atomic_t init_deasserted;
130 * Report back to the Boot Processor during boot time or to the caller processor 130 * Report back to the Boot Processor during boot time or to the caller processor
131 * during CPU online. 131 * during CPU online.
132 */ 132 */
133static void __cpuinit smp_callin(void) 133static void smp_callin(void)
134{ 134{
135 int cpuid, phys_id; 135 int cpuid, phys_id;
136 unsigned long timeout; 136 unsigned long timeout;
@@ -237,7 +237,7 @@ static int enable_start_cpu0;
237/* 237/*
238 * Activate a secondary processor. 238 * Activate a secondary processor.
239 */ 239 */
240notrace static void __cpuinit start_secondary(void *unused) 240static void notrace start_secondary(void *unused)
241{ 241{
242 /* 242 /*
243 * Don't put *anything* before cpu_init(), SMP booting is too 243 * Don't put *anything* before cpu_init(), SMP booting is too
@@ -300,7 +300,7 @@ void __init smp_store_boot_cpu_info(void)
300 * The bootstrap kernel entry code has set these up. Save them for 300 * The bootstrap kernel entry code has set these up. Save them for
301 * a given CPU 301 * a given CPU
302 */ 302 */
303void __cpuinit smp_store_cpu_info(int id) 303void smp_store_cpu_info(int id)
304{ 304{
305 struct cpuinfo_x86 *c = &cpu_data(id); 305 struct cpuinfo_x86 *c = &cpu_data(id);
306 306
@@ -313,7 +313,7 @@ void __cpuinit smp_store_cpu_info(int id)
313 identify_secondary_cpu(c); 313 identify_secondary_cpu(c);
314} 314}
315 315
316static bool __cpuinit 316static bool
317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) 317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
318{ 318{
319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -330,7 +330,7 @@ do { \
330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ 330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
331} while (0) 331} while (0)
332 332
333static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 333static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
334{ 334{
335 if (cpu_has_topoext) { 335 if (cpu_has_topoext) {
336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -348,7 +348,7 @@ static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
348 return false; 348 return false;
349} 349}
350 350
351static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 351static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
352{ 352{
353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
354 354
@@ -359,7 +359,7 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
359 return false; 359 return false;
360} 360}
361 361
362static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 362static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
363{ 363{
364 if (c->phys_proc_id == o->phys_proc_id) { 364 if (c->phys_proc_id == o->phys_proc_id) {
365 if (cpu_has(c, X86_FEATURE_AMD_DCM)) 365 if (cpu_has(c, X86_FEATURE_AMD_DCM))
@@ -370,7 +370,7 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
370 return false; 370 return false;
371} 371}
372 372
373void __cpuinit set_cpu_sibling_map(int cpu) 373void set_cpu_sibling_map(int cpu)
374{ 374{
375 bool has_smt = smp_num_siblings > 1; 375 bool has_smt = smp_num_siblings > 1;
376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; 376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
@@ -499,7 +499,7 @@ void __inquire_remote_apic(int apicid)
499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
500 * won't ... remember to clear down the APIC, etc later. 500 * won't ... remember to clear down the APIC, etc later.
501 */ 501 */
502int __cpuinit 502int
503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) 503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
504{ 504{
505 unsigned long send_status, accept_status = 0; 505 unsigned long send_status, accept_status = 0;
@@ -533,7 +533,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
533 return (send_status | accept_status); 533 return (send_status | accept_status);
534} 534}
535 535
536static int __cpuinit 536static int
537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
538{ 538{
539 unsigned long send_status, accept_status = 0; 539 unsigned long send_status, accept_status = 0;
@@ -649,7 +649,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
649} 649}
650 650
651/* reduce the number of lines printed when booting a large cpu count system */ 651/* reduce the number of lines printed when booting a large cpu count system */
652static void __cpuinit announce_cpu(int cpu, int apicid) 652static void announce_cpu(int cpu, int apicid)
653{ 653{
654 static int current_node = -1; 654 static int current_node = -1;
655 int node = early_cpu_to_node(cpu); 655 int node = early_cpu_to_node(cpu);
@@ -691,7 +691,7 @@ static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
691 * We'll change this code in the future to wake up hard offlined CPU0 if 691 * We'll change this code in the future to wake up hard offlined CPU0 if
692 * real platform and request are available. 692 * real platform and request are available.
693 */ 693 */
694static int __cpuinit 694static int
695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, 695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
696 int *cpu0_nmi_registered) 696 int *cpu0_nmi_registered)
697{ 697{
@@ -731,7 +731,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
731 * Returns zero if CPU booted OK, else error code from 731 * Returns zero if CPU booted OK, else error code from
732 * ->wakeup_secondary_cpu. 732 * ->wakeup_secondary_cpu.
733 */ 733 */
734static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 734static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
735{ 735{
736 volatile u32 *trampoline_status = 736 volatile u32 *trampoline_status =
737 (volatile u32 *) __va(real_mode_header->trampoline_status); 737 (volatile u32 *) __va(real_mode_header->trampoline_status);
@@ -872,7 +872,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
872 return boot_error; 872 return boot_error;
873} 873}
874 874
875int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) 875int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
876{ 876{
877 int apicid = apic->cpu_present_to_apicid(cpu); 877 int apicid = apic->cpu_present_to_apicid(cpu);
878 unsigned long flags; 878 unsigned long flags;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 3ff42d2f046d..addf7b58f4e8 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -320,8 +320,8 @@ static int tboot_wait_for_aps(int num_aps)
320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); 320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
321} 321}
322 322
323static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, 323static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
324 unsigned long action, void *hcpu) 324 void *hcpu)
325{ 325{
326 switch (action) { 326 switch (action) {
327 case CPU_DYING: 327 case CPU_DYING:
@@ -334,7 +334,7 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
334 return NOTIFY_OK; 334 return NOTIFY_OK;
335} 335}
336 336
337static struct notifier_block tboot_cpu_notifier __cpuinitdata = 337static struct notifier_block tboot_cpu_notifier =
338{ 338{
339 .notifier_call = tboot_cpu_callback, 339 .notifier_call = tboot_cpu_callback,
340}; 340};
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 4e584a8d6edd..1c113db9ed57 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -12,10 +12,8 @@ atomic_t trace_idt_ctr = ATOMIC_INIT(0);
12struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, 12struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
13 (unsigned long) trace_idt_table }; 13 (unsigned long) trace_idt_table };
14 14
15#ifndef CONFIG_X86_64 15/* No need to be aligned, but done to keep all IDTs defined the same way. */
16gate_desc trace_idt_table[NR_VECTORS] __page_aligned_data 16gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
17 = { { { { 0, 0 } } }, };
18#endif
19 17
20static int trace_irq_vector_refcount; 18static int trace_irq_vector_refcount;
21static DEFINE_MUTEX(irq_vector_mutex); 19static DEFINE_MUTEX(irq_vector_mutex);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b0865e88d3cc..1b23a1c92746 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -63,19 +63,19 @@
63#include <asm/x86_init.h> 63#include <asm/x86_init.h>
64#include <asm/pgalloc.h> 64#include <asm/pgalloc.h>
65#include <asm/proto.h> 65#include <asm/proto.h>
66
67/* No need to be aligned, but done to keep all IDTs defined the same way. */
68gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
66#else 69#else
67#include <asm/processor-flags.h> 70#include <asm/processor-flags.h>
68#include <asm/setup.h> 71#include <asm/setup.h>
69 72
70asmlinkage int system_call(void); 73asmlinkage int system_call(void);
71
72/*
73 * The IDT has to be page-aligned to simplify the Pentium
74 * F0 0F bug workaround.
75 */
76gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
77#endif 74#endif
78 75
76/* Must be page-aligned because the real IDT is used in a fixmap. */
77gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
78
79DECLARE_BITMAP(used_vectors, NR_VECTORS); 79DECLARE_BITMAP(used_vectors, NR_VECTORS);
80EXPORT_SYMBOL_GPL(used_vectors); 80EXPORT_SYMBOL_GPL(used_vectors);
81 81
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 098b3cfda72e..6ff49247edf8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -824,7 +824,7 @@ static void __init check_system_tsc_reliable(void)
824 * Make an educated guess if the TSC is trustworthy and synchronized 824 * Make an educated guess if the TSC is trustworthy and synchronized
825 * over all CPUs. 825 * over all CPUs.
826 */ 826 */
827__cpuinit int unsynchronized_tsc(void) 827int unsynchronized_tsc(void)
828{ 828{
829 if (!cpu_has_tsc || tsc_unstable) 829 if (!cpu_has_tsc || tsc_unstable)
830 return 1; 830 return 1;
@@ -1020,7 +1020,7 @@ void __init tsc_init(void)
1020 * been calibrated. This assumes that CONSTANT_TSC applies to all 1020 * been calibrated. This assumes that CONSTANT_TSC applies to all
1021 * cpus in the socket - this should be a safe assumption. 1021 * cpus in the socket - this should be a safe assumption.
1022 */ 1022 */
1023unsigned long __cpuinit calibrate_delay_is_known(void) 1023unsigned long calibrate_delay_is_known(void)
1024{ 1024{
1025 int i, cpu = smp_processor_id(); 1025 int i, cpu = smp_processor_id();
1026 1026
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index fc25e60a5884..adfdf56a3714 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -25,24 +25,24 @@
25 * Entry/exit counters that make sure that both CPUs 25 * Entry/exit counters that make sure that both CPUs
26 * run the measurement code at once: 26 * run the measurement code at once:
27 */ 27 */
28static __cpuinitdata atomic_t start_count; 28static atomic_t start_count;
29static __cpuinitdata atomic_t stop_count; 29static atomic_t stop_count;
30 30
31/* 31/*
32 * We use a raw spinlock in this exceptional case, because 32 * We use a raw spinlock in this exceptional case, because
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 36static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static cycles_t max_warp;
40static __cpuinitdata int nr_warps; 40static int nr_warps;
41 41
42/* 42/*
43 * TSC-warp measurement loop running on both CPUs: 43 * TSC-warp measurement loop running on both CPUs:
44 */ 44 */
45static __cpuinit void check_tsc_warp(unsigned int timeout) 45static void check_tsc_warp(unsigned int timeout)
46{ 46{
47 cycles_t start, now, prev, end; 47 cycles_t start, now, prev, end;
48 int i; 48 int i;
@@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu)
121 * Source CPU calls into this - it waits for the freshly booted 121 * Source CPU calls into this - it waits for the freshly booted
122 * target CPU to arrive and then starts the measurement: 122 * target CPU to arrive and then starts the measurement:
123 */ 123 */
124void __cpuinit check_tsc_sync_source(int cpu) 124void check_tsc_sync_source(int cpu)
125{ 125{
126 int cpus = 2; 126 int cpus = 2;
127 127
@@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
187/* 187/*
188 * Freshly booted CPUs call into this: 188 * Freshly booted CPUs call into this:
189 */ 189 */
190void __cpuinit check_tsc_sync_target(void) 190void check_tsc_sync_target(void)
191{ 191{
192 int cpus = 2; 192 int cpus = 2;
193 193
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9a907a67be8f..1f96f9347ed9 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -331,7 +331,7 @@ sigsegv:
331 * Assume __initcall executes before all user space. Hopefully kmod 331 * Assume __initcall executes before all user space. Hopefully kmod
332 * doesn't violate that. We'll find out if it does. 332 * doesn't violate that. We'll find out if it does.
333 */ 333 */
334static void __cpuinit vsyscall_set_cpu(int cpu) 334static void vsyscall_set_cpu(int cpu)
335{ 335{
336 unsigned long d; 336 unsigned long d;
337 unsigned long node = 0; 337 unsigned long node = 0;
@@ -353,13 +353,13 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); 353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
354} 354}
355 355
356static void __cpuinit cpu_vsyscall_init(void *arg) 356static void cpu_vsyscall_init(void *arg)
357{ 357{
358 /* preemption should be already off */ 358 /* preemption should be already off */
359 vsyscall_set_cpu(raw_smp_processor_id()); 359 vsyscall_set_cpu(raw_smp_processor_id());
360} 360}
361 361
362static int __cpuinit 362static int
363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
364{ 364{
365 long cpu = (long)arg; 365 long cpu = (long)arg;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14dbbddaf..5f24c71accaa 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -25,7 +25,7 @@
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26#include <asm/mach_traps.h> 26#include <asm/mach_traps.h>
27 27
28void __cpuinit x86_init_noop(void) { } 28void x86_init_noop(void) { }
29void __init x86_init_uint_noop(unsigned int unused) { } 29void __init x86_init_uint_noop(unsigned int unused) { }
30int __init iommu_init_noop(void) { return 0; } 30int __init iommu_init_noop(void) { return 0; }
31void iommu_shutdown_noop(void) { } 31void iommu_shutdown_noop(void) { }
@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
85 }, 85 },
86}; 86};
87 87
88struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 88struct x86_cpuinit_ops x86_cpuinit = {
89 .early_percpu_clock_init = x86_init_noop, 89 .early_percpu_clock_init = x86_init_noop,
90 .setup_percpu_clockev = setup_secondary_APIC_clock, 90 .setup_percpu_clockev = setup_secondary_APIC_clock,
91}; 91};
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index d6c28acdf99c..422fd8223470 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -573,7 +573,7 @@ static void __init xstate_enable_boot_cpu(void)
573 * This is somewhat obfuscated due to the lack of powerful enough 573 * This is somewhat obfuscated due to the lack of powerful enough
574 * overrides for the section checks. 574 * overrides for the section checks.
575 */ 575 */
576void __cpuinit xsave_init(void) 576void xsave_init(void)
577{ 577{
578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; 578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
579 void (*this_func)(void); 579 void (*this_func)(void);
@@ -594,7 +594,7 @@ static inline void __init eager_fpu_init_bp(void)
594 setup_init_fpu_buf(); 594 setup_init_fpu_buf();
595} 595}
596 596
597void __cpuinit eager_fpu_init(void) 597void eager_fpu_init(void)
598{ 598{
599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp; 599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
600 600
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0d094da49541..9e9285ae9b94 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2811,6 +2811,13 @@ exit:
2811static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code) 2811static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code)
2812{ 2812{
2813 /* 2813 /*
2814 * Do not fix the mmio spte with invalid generation number which
2815 * need to be updated by slow page fault path.
2816 */
2817 if (unlikely(error_code & PFERR_RSVD_MASK))
2818 return false;
2819
2820 /*
2814 * #PF can be fast only if the shadow page table is present and it 2821 * #PF can be fast only if the shadow page table is present and it
2815 * is caused by write-protect, that means we just need change the 2822 * is caused by write-protect, that means we just need change the
2816 * W bit of the spte which can be done out of mmu-lock. 2823 * W bit of the spte which can be done out of mmu-lock.
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index dc0b727742f4..0057a7accfb1 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -410,9 +410,7 @@ out:
410 pr_warning("multiple CPUs still online, may miss events.\n"); 410 pr_warning("multiple CPUs still online, may miss events.\n");
411} 411}
412 412
413/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, 413static void leave_uniprocessor(void)
414 but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
415static void __ref leave_uniprocessor(void)
416{ 414{
417 int cpu; 415 int cpu;
418 int err; 416 int err;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a71c4e207679..8bf93bae1f13 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -60,7 +60,7 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] = {
60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61}; 61};
62 62
63int __cpuinit numa_cpu_node(int cpu) 63int numa_cpu_node(int cpu)
64{ 64{
65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66 66
@@ -691,12 +691,12 @@ void __init init_cpu_to_node(void)
691#ifndef CONFIG_DEBUG_PER_CPU_MAPS 691#ifndef CONFIG_DEBUG_PER_CPU_MAPS
692 692
693# ifndef CONFIG_NUMA_EMU 693# ifndef CONFIG_NUMA_EMU
694void __cpuinit numa_add_cpu(int cpu) 694void numa_add_cpu(int cpu)
695{ 695{
696 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 696 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
697} 697}
698 698
699void __cpuinit numa_remove_cpu(int cpu) 699void numa_remove_cpu(int cpu)
700{ 700{
701 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 701 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
702} 702}
@@ -763,17 +763,17 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable)
763} 763}
764 764
765# ifndef CONFIG_NUMA_EMU 765# ifndef CONFIG_NUMA_EMU
766static void __cpuinit numa_set_cpumask(int cpu, bool enable) 766static void numa_set_cpumask(int cpu, bool enable)
767{ 767{
768 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 768 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
769} 769}
770 770
771void __cpuinit numa_add_cpu(int cpu) 771void numa_add_cpu(int cpu)
772{ 772{
773 numa_set_cpumask(cpu, true); 773 numa_set_cpumask(cpu, true);
774} 774}
775 775
776void __cpuinit numa_remove_cpu(int cpu) 776void numa_remove_cpu(int cpu)
777{ 777{
778 numa_set_cpumask(cpu, false); 778 numa_set_cpumask(cpu, false);
779} 779}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index dbbbb47260cc..a8f90ce3dedf 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -10,7 +10,7 @@
10 10
11#include "numa_internal.h" 11#include "numa_internal.h"
12 12
13static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; 13static int emu_nid_to_phys[MAX_NUMNODES];
14static char *emu_cmdline __initdata; 14static char *emu_cmdline __initdata;
15 15
16void __init numa_emu_cmdline(char *str) 16void __init numa_emu_cmdline(char *str)
@@ -444,7 +444,7 @@ no_emu:
444} 444}
445 445
446#ifndef CONFIG_DEBUG_PER_CPU_MAPS 446#ifndef CONFIG_DEBUG_PER_CPU_MAPS
447void __cpuinit numa_add_cpu(int cpu) 447void numa_add_cpu(int cpu)
448{ 448{
449 int physnid, nid; 449 int physnid, nid;
450 450
@@ -462,7 +462,7 @@ void __cpuinit numa_add_cpu(int cpu)
462 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); 462 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
463} 463}
464 464
465void __cpuinit numa_remove_cpu(int cpu) 465void numa_remove_cpu(int cpu)
466{ 466{
467 int i; 467 int i;
468 468
@@ -470,7 +470,7 @@ void __cpuinit numa_remove_cpu(int cpu)
470 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 470 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
471} 471}
472#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 472#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
473static void __cpuinit numa_set_cpumask(int cpu, bool enable) 473static void numa_set_cpumask(int cpu, bool enable)
474{ 474{
475 int nid, physnid; 475 int nid, physnid;
476 476
@@ -490,12 +490,12 @@ static void __cpuinit numa_set_cpumask(int cpu, bool enable)
490 } 490 }
491} 491}
492 492
493void __cpuinit numa_add_cpu(int cpu) 493void numa_add_cpu(int cpu)
494{ 494{
495 numa_set_cpumask(cpu, true); 495 numa_set_cpumask(cpu, true);
496} 496}
497 497
498void __cpuinit numa_remove_cpu(int cpu) 498void numa_remove_cpu(int cpu)
499{ 499{
500 numa_set_cpumask(cpu, false); 500 numa_set_cpumask(cpu, false);
501} 501}
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 410531d3c292..90555bf60aa4 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -5,7 +5,7 @@
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6#include <asm/proto.h> 6#include <asm/proto.h>
7 7
8static int disable_nx __cpuinitdata; 8static int disable_nx;
9 9
10/* 10/*
11 * noexec = on|off 11 * noexec = on|off
@@ -29,7 +29,7 @@ static int __init noexec_setup(char *str)
29} 29}
30early_param("noexec", noexec_setup); 30early_param("noexec", noexec_setup);
31 31
32void __cpuinit x86_configure_nx(void) 32void x86_configure_nx(void)
33{ 33{
34 if (cpu_has_nx && !disable_nx) 34 if (cpu_has_nx && !disable_nx)
35 __supported_pte_mask |= _PAGE_NX; 35 __supported_pte_mask |= _PAGE_NX;
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index e9e6ed5cdf94..a48be98e9ded 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -312,7 +312,7 @@ static int __init early_fill_mp_bus_info(void)
312 312
313#define ENABLE_CF8_EXT_CFG (1ULL << 46) 313#define ENABLE_CF8_EXT_CFG (1ULL << 46)
314 314
315static void __cpuinit enable_pci_io_ecs(void *unused) 315static void enable_pci_io_ecs(void *unused)
316{ 316{
317 u64 reg; 317 u64 reg;
318 rdmsrl(MSR_AMD64_NB_CFG, reg); 318 rdmsrl(MSR_AMD64_NB_CFG, reg);
@@ -322,8 +322,8 @@ static void __cpuinit enable_pci_io_ecs(void *unused)
322 } 322 }
323} 323}
324 324
325static int __cpuinit amd_cpu_notify(struct notifier_block *self, 325static int amd_cpu_notify(struct notifier_block *self, unsigned long action,
326 unsigned long action, void *hcpu) 326 void *hcpu)
327{ 327{
328 int cpu = (long)hcpu; 328 int cpu = (long)hcpu;
329 switch (action) { 329 switch (action) {
@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
337 return NOTIFY_OK; 337 return NOTIFY_OK;
338} 338}
339 339
340static struct notifier_block __cpuinitdata amd_cpu_notifier = { 340static struct notifier_block amd_cpu_notifier = {
341 .notifier_call = amd_cpu_notify, 341 .notifier_call = amd_cpu_notify,
342}; 342};
343 343
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index f8ab4945892e..8244f5ec2f4c 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -12,8 +12,10 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/reboot.h>
15#include <linux/serial_reg.h> 16#include <linux/serial_reg.h>
16#include <linux/serial_8250.h> 17#include <linux/serial_8250.h>
18#include <linux/reboot.h>
17 19
18#include <asm/ce4100.h> 20#include <asm/ce4100.h>
19#include <asm/prom.h> 21#include <asm/prom.h>
@@ -134,7 +136,7 @@ static void __init sdv_arch_setup(void)
134} 136}
135 137
136#ifdef CONFIG_X86_IO_APIC 138#ifdef CONFIG_X86_IO_APIC
137static void __cpuinit sdv_pci_init(void) 139static void sdv_pci_init(void)
138{ 140{
139 x86_of_pci_init(); 141 x86_of_pci_init();
140 /* We can't set this earlier, because we need to calibrate the timer */ 142 /* We can't set this earlier, because we need to calibrate the timer */
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index c8d5577044bb..90f6ed127096 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -931,13 +931,6 @@ void __init efi_enter_virtual_mode(void)
931 va = efi_ioremap(md->phys_addr, size, 931 va = efi_ioremap(md->phys_addr, size,
932 md->type, md->attribute); 932 md->type, md->attribute);
933 933
934 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
935 if (!va)
936 pr_err("ioremap of 0x%llX failed!\n",
937 (unsigned long long)md->phys_addr);
938 continue;
939 }
940
941 md->virt_addr = (u64) (unsigned long) va; 934 md->virt_addr = (u64) (unsigned long) va;
942 935
943 if (!va) { 936 if (!va) {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index a0a0a4389bbd..47fe66fe61f1 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -65,7 +65,7 @@
65 * lapic (always-on,ARAT) ------ 150 65 * lapic (always-on,ARAT) ------ 150
66 */ 66 */
67 67
68__cpuinitdata enum mrst_timer_options mrst_timer_options; 68enum mrst_timer_options mrst_timer_options;
69 69
70static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; 70static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
71static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; 71static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
@@ -248,7 +248,7 @@ static void __init mrst_time_init(void)
248 apbt_time_init(); 248 apbt_time_init();
249} 249}
250 250
251static void __cpuinit mrst_arch_setup(void) 251static void mrst_arch_setup(void)
252{ 252{
253 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) 253 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
254 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; 254 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index ae7319db18ee..5e04a1c899fa 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -508,7 +508,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
508{ 508{
509 struct rt_sigframe __user *frame; 509 struct rt_sigframe __user *frame;
510 int err = 0; 510 int err = 0;
511 struct task_struct *me = current;
512 511
513 frame = (struct rt_sigframe __user *) 512 frame = (struct rt_sigframe __user *)
514 round_down(stack_top - sizeof(struct rt_sigframe), 16); 513 round_down(stack_top - sizeof(struct rt_sigframe), 16);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2fa02bc50034..193097ef3d7d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1681,8 +1681,8 @@ static void __init init_hvm_pv_info(void)
1681 xen_domain_type = XEN_HVM_DOMAIN; 1681 xen_domain_type = XEN_HVM_DOMAIN;
1682} 1682}
1683 1683
1684static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, 1684static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1685 unsigned long action, void *hcpu) 1685 void *hcpu)
1686{ 1686{
1687 int cpu = (long)hcpu; 1687 int cpu = (long)hcpu;
1688 switch (action) { 1688 switch (action) {
@@ -1700,7 +1700,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1700 return NOTIFY_OK; 1700 return NOTIFY_OK;
1701} 1701}
1702 1702
1703static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { 1703static struct notifier_block xen_hvm_cpu_notifier = {
1704 .notifier_call = xen_hvm_cpu_notify, 1704 .notifier_call = xen_hvm_cpu_notify,
1705}; 1705};
1706 1706
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 94eac5c85cdc..056d11faef21 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -475,7 +475,7 @@ static void __init fiddle_vdso(void)
475#endif 475#endif
476} 476}
477 477
478static int __cpuinit register_callback(unsigned type, const void *func) 478static int register_callback(unsigned type, const void *func)
479{ 479{
480 struct callback_register callback = { 480 struct callback_register callback = {
481 .type = type, 481 .type = type,
@@ -486,7 +486,7 @@ static int __cpuinit register_callback(unsigned type, const void *func)
486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); 486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
487} 487}
488 488
489void __cpuinit xen_enable_sysenter(void) 489void xen_enable_sysenter(void)
490{ 490{
491 int ret; 491 int ret;
492 unsigned sysenter_feature; 492 unsigned sysenter_feature;
@@ -505,7 +505,7 @@ void __cpuinit xen_enable_sysenter(void)
505 setup_clear_cpu_cap(sysenter_feature); 505 setup_clear_cpu_cap(sysenter_feature);
506} 506}
507 507
508void __cpuinit xen_enable_syscall(void) 508void xen_enable_syscall(void)
509{ 509{
510#ifdef CONFIG_X86_64 510#ifdef CONFIG_X86_64
511 int ret; 511 int ret;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c1367b29c3b1..ca92754eb846 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -65,7 +65,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
65 return IRQ_HANDLED; 65 return IRQ_HANDLED;
66} 66}
67 67
68static void __cpuinit cpu_bringup(void) 68static void cpu_bringup(void)
69{ 69{
70 int cpu; 70 int cpu;
71 71
@@ -97,7 +97,7 @@ static void __cpuinit cpu_bringup(void)
97 wmb(); /* make sure everything is out */ 97 wmb(); /* make sure everything is out */
98} 98}
99 99
100static void __cpuinit cpu_bringup_and_idle(void) 100static void cpu_bringup_and_idle(void)
101{ 101{
102 cpu_bringup(); 102 cpu_bringup();
103 cpu_startup_entry(CPUHP_ONLINE); 103 cpu_startup_entry(CPUHP_ONLINE);
@@ -326,7 +326,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
326 set_cpu_present(cpu, true); 326 set_cpu_present(cpu, true);
327} 327}
328 328
329static int __cpuinit 329static int
330cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 330cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
331{ 331{
332 struct vcpu_guest_context *ctxt; 332 struct vcpu_guest_context *ctxt;
@@ -397,7 +397,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
397 return 0; 397 return 0;
398} 398}
399 399
400static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 400static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
401{ 401{
402 int rc; 402 int rc;
403 403
@@ -470,7 +470,7 @@ static void xen_cpu_die(unsigned int cpu)
470 xen_teardown_timer(cpu); 470 xen_teardown_timer(cpu);
471} 471}
472 472
473static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 473static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
474{ 474{
475 play_dead_common(); 475 play_dead_common();
476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
@@ -691,7 +691,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
691 xen_init_lock_cpu(0); 691 xen_init_lock_cpu(0);
692} 692}
693 693
694static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
695{ 695{
696 int rc; 696 int rc;
697 rc = native_cpu_up(cpu, tidle); 697 rc = native_cpu_up(cpu, tidle);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index a40f8508e760..cf3caee356b3 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -361,7 +361,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
361 return IRQ_HANDLED; 361 return IRQ_HANDLED;
362} 362}
363 363
364void __cpuinit xen_init_lock_cpu(int cpu) 364void xen_init_lock_cpu(int cpu)
365{ 365{
366 int irq; 366 int irq;
367 char *name; 367 char *name;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a95b41744ad0..86782c5d7e2a 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -73,7 +73,7 @@ static inline void xen_hvm_smp_init(void) {}
73 73
74#ifdef CONFIG_PARAVIRT_SPINLOCKS 74#ifdef CONFIG_PARAVIRT_SPINLOCKS
75void __init xen_init_spinlocks(void); 75void __init xen_init_spinlocks(void);
76void __cpuinit xen_init_lock_cpu(int cpu); 76void xen_init_lock_cpu(int cpu);
77void xen_uninit_lock_cpu(int cpu); 77void xen_uninit_lock_cpu(int cpu);
78#else 78#else
79static inline void xen_init_spinlocks(void) 79static inline void xen_init_spinlocks(void)
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index bdbb17312526..24bb0c1776ba 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -162,7 +162,7 @@ irqreturn_t timer_interrupt (int irq, void *dev_id)
162} 162}
163 163
164#ifndef CONFIG_GENERIC_CALIBRATE_DELAY 164#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
165void __cpuinit calibrate_delay(void) 165void calibrate_delay(void)
166{ 166{
167 loops_per_jiffy = CCOUNT_PER_JIFFY; 167 loops_per_jiffy = CCOUNT_PER_JIFFY;
168 printk("Calibrating delay loop (skipped)... " 168 printk("Calibrating delay loop (skipped)... "
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 58916afbbda5..4b8d9b541112 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -189,8 +189,8 @@ void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
189} 189}
190EXPORT_SYMBOL(blk_iopoll_init); 190EXPORT_SYMBOL(blk_iopoll_init);
191 191
192static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self, 192static int blk_iopoll_cpu_notify(struct notifier_block *self,
193 unsigned long action, void *hcpu) 193 unsigned long action, void *hcpu)
194{ 194{
195 /* 195 /*
196 * If a CPU goes away, splice its entries to the current CPU 196 * If a CPU goes away, splice its entries to the current CPU
@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
209 return NOTIFY_OK; 209 return NOTIFY_OK;
210} 210}
211 211
212static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = { 212static struct notifier_block blk_iopoll_cpu_notifier = {
213 .notifier_call = blk_iopoll_cpu_notify, 213 .notifier_call = blk_iopoll_cpu_notify,
214}; 214};
215 215
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 467c8de88642..ec9e60636f43 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -78,8 +78,8 @@ static int raise_blk_irq(int cpu, struct request *rq)
78} 78}
79#endif 79#endif
80 80
81static int __cpuinit blk_cpu_notify(struct notifier_block *self, 81static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
82 unsigned long action, void *hcpu) 82 void *hcpu)
83{ 83{
84 /* 84 /*
85 * If a CPU goes away, splice its entries to the current CPU 85 * If a CPU goes away, splice its entries to the current CPU
@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
98 return NOTIFY_OK; 98 return NOTIFY_OK;
99} 99}
100 100
101static struct notifier_block __cpuinitdata blk_cpu_notifier = { 101static struct notifier_block blk_cpu_notifier = {
102 .notifier_call = blk_cpu_notify, 102 .notifier_call = blk_cpu_notify,
103}; 103};
104 104
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 69ce573f1224..aca01164f002 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -376,25 +376,6 @@ config CRYPTO_CRC32_PCLMUL
376 which will enable any routine to use the CRC-32-IEEE 802.3 checksum 376 which will enable any routine to use the CRC-32-IEEE 802.3 checksum
377 and gain better performance as compared with the table implementation. 377 and gain better performance as compared with the table implementation.
378 378
379config CRYPTO_CRCT10DIF
380 tristate "CRCT10DIF algorithm"
381 select CRYPTO_HASH
382 help
383 CRC T10 Data Integrity Field computation is being cast as
384 a crypto transform. This allows for faster crc t10 diff
385 transforms to be used if they are available.
386
387config CRYPTO_CRCT10DIF_PCLMUL
388 tristate "CRCT10DIF PCLMULQDQ hardware acceleration"
389 depends on X86 && 64BIT && CRC_T10DIF
390 select CRYPTO_HASH
391 help
392 For x86_64 processors with SSE4.2 and PCLMULQDQ supported,
393 CRC T10 DIF PCLMULQDQ computation can be hardware
394 accelerated PCLMULQDQ instruction. This option will create
395 'crct10dif-plcmul' module, which is faster when computing the
396 crct10dif checksum as compared with the generic table implementation.
397
398config CRYPTO_GHASH 379config CRYPTO_GHASH
399 tristate "GHASH digest algorithm" 380 tristate "GHASH digest algorithm"
400 select CRYPTO_GF128MUL 381 select CRYPTO_GF128MUL
diff --git a/crypto/Makefile b/crypto/Makefile
index 2d5ed08a239f..2ba0df2f908f 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -83,7 +83,6 @@ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
83obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o 83obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
84obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o 84obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
85obj-$(CONFIG_CRYPTO_CRC32) += crc32.o 85obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
86obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o
87obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o 86obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
88obj-$(CONFIG_CRYPTO_LZO) += lzo.o 87obj-$(CONFIG_CRYPTO_LZO) += lzo.o
89obj-$(CONFIG_CRYPTO_LZ4) += lz4.o 88obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
diff --git a/crypto/crct10dif.c b/crypto/crct10dif.c
deleted file mode 100644
index 92aca96d6b98..000000000000
--- a/crypto/crct10dif.c
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * T10 Data Integrity Field CRC16 Crypto Transform
5 *
6 * Copyright (c) 2007 Oracle Corporation. All rights reserved.
7 * Written by Martin K. Petersen <martin.petersen@oracle.com>
8 * Copyright (C) 2013 Intel Corporation
9 * Author: Tim Chen <tim.c.chen@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/crc-t10dif.h>
30#include <crypto/internal/hash.h>
31#include <linux/init.h>
32#include <linux/string.h>
33#include <linux/kernel.h>
34
35struct chksum_desc_ctx {
36 __u16 crc;
37};
38
39/* Table generated using the following polynomium:
40 * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
41 * gt: 0x8bb7
42 */
43static const __u16 t10_dif_crc_table[256] = {
44 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
45 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
46 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
47 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
48 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
49 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
50 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
51 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
52 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
53 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
54 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
55 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
56 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
57 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
58 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
59 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
60 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
61 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
62 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
63 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
64 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
65 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
66 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
67 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
68 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
69 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
70 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
71 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
72 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
73 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
74 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
75 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
76};
77
78__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len)
79{
80 unsigned int i;
81
82 for (i = 0 ; i < len ; i++)
83 crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
84
85 return crc;
86}
87EXPORT_SYMBOL(crc_t10dif_generic);
88
89/*
90 * Steps through buffer one byte at at time, calculates reflected
91 * crc using table.
92 */
93
94static int chksum_init(struct shash_desc *desc)
95{
96 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
97
98 ctx->crc = 0;
99
100 return 0;
101}
102
103static int chksum_update(struct shash_desc *desc, const u8 *data,
104 unsigned int length)
105{
106 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
107
108 ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
109 return 0;
110}
111
112static int chksum_final(struct shash_desc *desc, u8 *out)
113{
114 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
115
116 *(__u16 *)out = ctx->crc;
117 return 0;
118}
119
120static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
121 u8 *out)
122{
123 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
124 return 0;
125}
126
127static int chksum_finup(struct shash_desc *desc, const u8 *data,
128 unsigned int len, u8 *out)
129{
130 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
131
132 return __chksum_finup(&ctx->crc, data, len, out);
133}
134
135static int chksum_digest(struct shash_desc *desc, const u8 *data,
136 unsigned int length, u8 *out)
137{
138 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
139
140 return __chksum_finup(&ctx->crc, data, length, out);
141}
142
143static struct shash_alg alg = {
144 .digestsize = CRC_T10DIF_DIGEST_SIZE,
145 .init = chksum_init,
146 .update = chksum_update,
147 .final = chksum_final,
148 .finup = chksum_finup,
149 .digest = chksum_digest,
150 .descsize = sizeof(struct chksum_desc_ctx),
151 .base = {
152 .cra_name = "crct10dif",
153 .cra_driver_name = "crct10dif-generic",
154 .cra_priority = 100,
155 .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
156 .cra_module = THIS_MODULE,
157 }
158};
159
160static int __init crct10dif_mod_init(void)
161{
162 int ret;
163
164 ret = crypto_register_shash(&alg);
165 return ret;
166}
167
168static void __exit crct10dif_mod_fini(void)
169{
170 crypto_unregister_shash(&alg);
171}
172
173module_init(crct10dif_mod_init);
174module_exit(crct10dif_mod_fini);
175
176MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
177MODULE_DESCRIPTION("T10 DIF CRC calculation.");
178MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 25a5934f0e50..66d254ce0d11 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1174,10 +1174,6 @@ static int do_test(int m)
1174 ret += tcrypt_test("ghash"); 1174 ret += tcrypt_test("ghash");
1175 break; 1175 break;
1176 1176
1177 case 47:
1178 ret += tcrypt_test("crct10dif");
1179 break;
1180
1181 case 100: 1177 case 100:
1182 ret += tcrypt_test("hmac(md5)"); 1178 ret += tcrypt_test("hmac(md5)");
1183 break; 1179 break;
@@ -1502,10 +1498,6 @@ static int do_test(int m)
1502 test_hash_speed("crc32c", sec, generic_hash_speed_template); 1498 test_hash_speed("crc32c", sec, generic_hash_speed_template);
1503 if (mode > 300 && mode < 400) break; 1499 if (mode > 300 && mode < 400) break;
1504 1500
1505 case 320:
1506 test_hash_speed("crct10dif", sec, generic_hash_speed_template);
1507 if (mode > 300 && mode < 400) break;
1508
1509 case 399: 1501 case 399:
1510 break; 1502 break;
1511 1503
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 2f00607039e2..ecddf921a9db 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2046,16 +2046,6 @@ static const struct alg_test_desc alg_test_descs[] = {
2046 } 2046 }
2047 } 2047 }
2048 }, { 2048 }, {
2049 .alg = "crct10dif",
2050 .test = alg_test_hash,
2051 .fips_allowed = 1,
2052 .suite = {
2053 .hash = {
2054 .vecs = crct10dif_tv_template,
2055 .count = CRCT10DIF_TEST_VECTORS
2056 }
2057 }
2058 }, {
2059 .alg = "cryptd(__driver-cbc-aes-aesni)", 2049 .alg = "cryptd(__driver-cbc-aes-aesni)",
2060 .test = alg_test_null, 2050 .test = alg_test_null,
2061 .fips_allowed = 1, 2051 .fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 7d44aa3d6b44..1e701bc075b9 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -450,39 +450,6 @@ static struct hash_testvec rmd320_tv_template[] = {
450 } 450 }
451}; 451};
452 452
453#define CRCT10DIF_TEST_VECTORS 3
454static struct hash_testvec crct10dif_tv_template[] = {
455 {
456 .plaintext = "abc",
457 .psize = 3,
458#ifdef __LITTLE_ENDIAN
459 .digest = "\x3b\x44",
460#else
461 .digest = "\x44\x3b",
462#endif
463 }, {
464 .plaintext = "1234567890123456789012345678901234567890"
465 "123456789012345678901234567890123456789",
466 .psize = 79,
467#ifdef __LITTLE_ENDIAN
468 .digest = "\x70\x4b",
469#else
470 .digest = "\x4b\x70",
471#endif
472 }, {
473 .plaintext =
474 "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
475 .psize = 56,
476#ifdef __LITTLE_ENDIAN
477 .digest = "\xe3\x9c",
478#else
479 .digest = "\x9c\xe3",
480#endif
481 .np = 2,
482 .tap = { 28, 28 }
483 }
484};
485
486/* 453/*
487 * SHA1 test vectors from from FIPS PUB 180-1 454 * SHA1 test vectors from from FIPS PUB 180-1
488 * Long vector from CAVS 5.0 455 * Long vector from CAVS 5.0
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d21167bfc865..dc34a5b8bcee 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -359,6 +359,9 @@ int braille_register_console(struct console *console, int index,
359 char *console_options, char *braille_options) 359 char *console_options, char *braille_options)
360{ 360{
361 int ret; 361 int ret;
362
363 if (!(console->flags & CON_BRL))
364 return 0;
362 if (!console_options) 365 if (!console_options)
363 /* Only support VisioBraille for now */ 366 /* Only support VisioBraille for now */
364 console_options = "57600o8"; 367 console_options = "57600o8";
@@ -374,15 +377,17 @@ int braille_register_console(struct console *console, int index,
374 braille_co = console; 377 braille_co = console;
375 register_keyboard_notifier(&keyboard_notifier_block); 378 register_keyboard_notifier(&keyboard_notifier_block);
376 register_vt_notifier(&vt_notifier_block); 379 register_vt_notifier(&vt_notifier_block);
377 return 0; 380 return 1;
378} 381}
379 382
380int braille_unregister_console(struct console *console) 383int braille_unregister_console(struct console *console)
381{ 384{
382 if (braille_co != console) 385 if (braille_co != console)
383 return -EINVAL; 386 return -EINVAL;
387 if (!(console->flags & CON_BRL))
388 return 0;
384 unregister_keyboard_notifier(&keyboard_notifier_block); 389 unregister_keyboard_notifier(&keyboard_notifier_block);
385 unregister_vt_notifier(&vt_notifier_block); 390 unregister_vt_notifier(&vt_notifier_block);
386 braille_co = NULL; 391 braille_co = NULL;
387 return 0; 392 return 1;
388} 393}
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index c711d1144044..999adb5499c7 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -323,6 +323,7 @@ static int acpi_memory_device_add(struct acpi_device *device,
323 /* Get the range from the _CRS */ 323 /* Get the range from the _CRS */
324 result = acpi_memory_get_device_resources(mem_device); 324 result = acpi_memory_get_device_resources(mem_device);
325 if (result) { 325 if (result) {
326 device->driver_data = NULL;
326 kfree(mem_device); 327 kfree(mem_device);
327 return result; 328 return result;
328 } 329 }
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index e9b01e35ac37..5a74a9c1e42c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -340,7 +340,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
340 */ 340 */
341static DEFINE_PER_CPU(void *, processor_device_array); 341static DEFINE_PER_CPU(void *, processor_device_array);
342 342
343static int __cpuinit acpi_processor_add(struct acpi_device *device, 343static int acpi_processor_add(struct acpi_device *device,
344 const struct acpi_device_id *id) 344 const struct acpi_device_id *id)
345{ 345{
346 struct acpi_processor *pr; 346 struct acpi_processor *pr;
@@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device)
451 /* Clean up. */ 451 /* Clean up. */
452 per_cpu(processor_device_array, pr->id) = NULL; 452 per_cpu(processor_device_array, pr->id) = NULL;
453 per_cpu(processors, pr->id) = NULL; 453 per_cpu(processors, pr->id) = NULL;
454 try_offline_node(cpu_to_node(pr->id));
455 454
456 /* Remove the CPU. */ 455 /* Remove the CPU. */
457 get_online_cpus(); 456 get_online_cpus();
@@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device)
459 acpi_unmap_lsapic(pr->id); 458 acpi_unmap_lsapic(pr->id);
460 put_online_cpus(); 459 put_online_cpus();
461 460
461 try_offline_node(cpu_to_node(pr->id));
462
462 out: 463 out:
463 free_cpumask_var(pr->throttling.shared_cpu_map); 464 free_cpumask_var(pr->throttling.shared_cpu_map);
464 kfree(pr); 465 kfree(pr);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index dfed26545ba2..d4a4901637cd 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -931,19 +931,6 @@ struct acpi_bit_register_info {
931 931
932/* Structs and definitions for _OSI support and I/O port validation */ 932/* Structs and definitions for _OSI support and I/O port validation */
933 933
934#define ACPI_OSI_WIN_2000 0x01
935#define ACPI_OSI_WIN_XP 0x02
936#define ACPI_OSI_WIN_XP_SP1 0x03
937#define ACPI_OSI_WINSRV_2003 0x04
938#define ACPI_OSI_WIN_XP_SP2 0x05
939#define ACPI_OSI_WINSRV_2003_SP1 0x06
940#define ACPI_OSI_WIN_VISTA 0x07
941#define ACPI_OSI_WINSRV_2008 0x08
942#define ACPI_OSI_WIN_VISTA_SP1 0x09
943#define ACPI_OSI_WIN_VISTA_SP2 0x0A
944#define ACPI_OSI_WIN_7 0x0B
945#define ACPI_OSI_WIN_8 0x0C
946
947#define ACPI_ALWAYS_ILLEGAL 0x00 934#define ACPI_ALWAYS_ILLEGAL 0x00
948 935
949struct acpi_interface_info { 936struct acpi_interface_info {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 082b4dd252a8..d405fbad406a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -117,6 +117,7 @@ struct acpi_battery {
117 struct acpi_device *device; 117 struct acpi_device *device;
118 struct notifier_block pm_nb; 118 struct notifier_block pm_nb;
119 unsigned long update_time; 119 unsigned long update_time;
120 int revision;
120 int rate_now; 121 int rate_now;
121 int capacity_now; 122 int capacity_now;
122 int voltage_now; 123 int voltage_now;
@@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = {
359}; 360};
360 361
361static struct acpi_offsets extended_info_offsets[] = { 362static struct acpi_offsets extended_info_offsets[] = {
363 {offsetof(struct acpi_battery, revision), 0},
362 {offsetof(struct acpi_battery, power_unit), 0}, 364 {offsetof(struct acpi_battery, power_unit), 0},
363 {offsetof(struct acpi_battery, design_capacity), 0}, 365 {offsetof(struct acpi_battery, design_capacity), 0},
364 {offsetof(struct acpi_battery, full_charge_capacity), 0}, 366 {offsetof(struct acpi_battery, full_charge_capacity), 0},
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f68095756fb7..408f6b2a5fa8 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list);
31static DECLARE_RWSEM(bus_type_sem); 31static DECLARE_RWSEM(bus_type_sem);
32 32
33#define PHYSICAL_NODE_STRING "physical_node" 33#define PHYSICAL_NODE_STRING "physical_node"
34#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
34 35
35int register_acpi_bus_type(struct acpi_bus_type *type) 36int register_acpi_bus_type(struct acpi_bus_type *type)
36{ 37{
@@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
78 return ret; 79 return ret;
79} 80}
80 81
81static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, 82static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
82 void *addr_p, void **ret_p) 83 void *not_used, void **ret_p)
83{ 84{
84 unsigned long long addr, sta; 85 struct acpi_device *adev = NULL;
85 acpi_status status;
86 86
87 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); 87 acpi_bus_get_device(handle, &adev);
88 if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { 88 if (adev) {
89 *ret_p = handle; 89 *ret_p = handle;
90 status = acpi_bus_get_status_handle(handle, &sta); 90 return AE_CTRL_TERMINATE;
91 if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
92 return AE_CTRL_TERMINATE;
93 } 91 }
94 return AE_OK; 92 return AE_OK;
95} 93}
96 94
97acpi_handle acpi_get_child(acpi_handle parent, u64 address) 95static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
98{ 96{
99 void *ret = NULL; 97 unsigned long long sta;
98 acpi_status status;
99
100 status = acpi_bus_get_status_handle(handle, &sta);
101 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
102 return false;
103
104 if (is_bridge) {
105 void *test = NULL;
106
107 /* Check if this object has at least one child device. */
108 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
109 acpi_dev_present, NULL, NULL, &test);
110 return !!test;
111 }
112 return true;
113}
114
115struct find_child_context {
116 u64 addr;
117 bool is_bridge;
118 acpi_handle ret;
119 bool ret_checked;
120};
121
122static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
123 void *data, void **not_used)
124{
125 struct find_child_context *context = data;
126 unsigned long long addr;
127 acpi_status status;
100 128
101 if (!parent) 129 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
102 return NULL; 130 if (ACPI_FAILURE(status) || addr != context->addr)
131 return AE_OK;
103 132
104 acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, 133 if (!context->ret) {
105 do_acpi_find_child, &address, &ret); 134 /* This is the first matching object. Save its handle. */
106 return (acpi_handle)ret; 135 context->ret = handle;
136 return AE_OK;
137 }
138 /*
139 * There is more than one matching object with the same _ADR value.
140 * That really is unexpected, so we are kind of beyond the scope of the
141 * spec here. We have to choose which one to return, though.
142 *
143 * First, check if the previously found object is good enough and return
144 * its handle if so. Second, check the same for the object that we've
145 * just found.
146 */
147 if (!context->ret_checked) {
148 if (acpi_extra_checks_passed(context->ret, context->is_bridge))
149 return AE_CTRL_TERMINATE;
150 else
151 context->ret_checked = true;
152 }
153 if (acpi_extra_checks_passed(handle, context->is_bridge)) {
154 context->ret = handle;
155 return AE_CTRL_TERMINATE;
156 }
157 return AE_OK;
107} 158}
108EXPORT_SYMBOL(acpi_get_child); 159
160acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
161{
162 if (parent) {
163 struct find_child_context context = {
164 .addr = addr,
165 .is_bridge = is_bridge,
166 };
167
168 acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
169 NULL, &context, NULL);
170 return context.ret;
171 }
172 return NULL;
173}
174EXPORT_SYMBOL_GPL(acpi_find_child);
109 175
110int acpi_bind_one(struct device *dev, acpi_handle handle) 176int acpi_bind_one(struct device *dev, acpi_handle handle)
111{ 177{
112 struct acpi_device *acpi_dev; 178 struct acpi_device *acpi_dev;
113 acpi_status status; 179 acpi_status status;
114 struct acpi_device_physical_node *physical_node, *pn; 180 struct acpi_device_physical_node *physical_node, *pn;
115 char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; 181 char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
182 struct list_head *physnode_list;
183 unsigned int node_id;
116 int retval = -EINVAL; 184 int retval = -EINVAL;
117 185
118 if (ACPI_HANDLE(dev)) { 186 if (ACPI_HANDLE(dev)) {
@@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
139 207
140 mutex_lock(&acpi_dev->physical_node_lock); 208 mutex_lock(&acpi_dev->physical_node_lock);
141 209
142 /* Sanity check. */ 210 /*
143 list_for_each_entry(pn, &acpi_dev->physical_node_list, node) 211 * Keep the list sorted by node_id so that the IDs of removed nodes can
212 * be recycled easily.
213 */
214 physnode_list = &acpi_dev->physical_node_list;
215 node_id = 0;
216 list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
217 /* Sanity check. */
144 if (pn->dev == dev) { 218 if (pn->dev == dev) {
145 dev_warn(dev, "Already associated with ACPI node\n"); 219 dev_warn(dev, "Already associated with ACPI node\n");
146 goto err_free; 220 goto err_free;
147 } 221 }
148 222 if (pn->node_id == node_id) {
149 /* allocate physical node id according to physical_node_id_bitmap */ 223 physnode_list = &pn->node;
150 physical_node->node_id = 224 node_id++;
151 find_first_zero_bit(acpi_dev->physical_node_id_bitmap, 225 }
152 ACPI_MAX_PHYSICAL_NODE);
153 if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
154 retval = -ENOSPC;
155 goto err_free;
156 } 226 }
157 227
158 set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); 228 physical_node->node_id = node_id;
159 physical_node->dev = dev; 229 physical_node->dev = dev;
160 list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); 230 list_add(&physical_node->node, physnode_list);
161 acpi_dev->physical_node_count++; 231 acpi_dev->physical_node_count++;
162 232
163 mutex_unlock(&acpi_dev->physical_node_lock); 233 mutex_unlock(&acpi_dev->physical_node_lock);
@@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev)
208 278
209 mutex_lock(&acpi_dev->physical_node_lock); 279 mutex_lock(&acpi_dev->physical_node_lock);
210 list_for_each_safe(node, next, &acpi_dev->physical_node_list) { 280 list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
211 char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; 281 char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
212 282
213 entry = list_entry(node, struct acpi_device_physical_node, 283 entry = list_entry(node, struct acpi_device_physical_node,
214 node); 284 node);
@@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev)
216 continue; 286 continue;
217 287
218 list_del(node); 288 list_del(node);
219 clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
220 289
221 acpi_dev->physical_node_count--; 290 acpi_dev->physical_node_count--;
222 291
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3a50a34fe176..5da44e81dd4d 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -164,4 +164,13 @@ struct platform_device;
164int acpi_create_platform_device(struct acpi_device *adev, 164int acpi_create_platform_device(struct acpi_device *adev,
165 const struct acpi_device_id *id); 165 const struct acpi_device_id *id);
166 166
167/*--------------------------------------------------------------------------
168 Video
169 -------------------------------------------------------------------------- */
170#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
171bool acpi_video_backlight_quirks(void);
172#else
173static inline bool acpi_video_backlight_quirks(void) { return false; }
174#endif
175
167#endif /* _ACPI_INTERNAL_H_ */ 176#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index aa1227a7e3f2..04a13784dd20 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
311 dev->pnp.bus_id, 311 dev->pnp.bus_id,
312 (u32) dev->wakeup.sleep_state); 312 (u32) dev->wakeup.sleep_state);
313 313
314 mutex_lock(&dev->physical_node_lock);
315
314 if (!dev->physical_node_count) { 316 if (!dev->physical_node_count) {
315 seq_printf(seq, "%c%-8s\n", 317 seq_printf(seq, "%c%-8s\n",
316 dev->wakeup.flags.run_wake ? '*' : ' ', 318 dev->wakeup.flags.run_wake ? '*' : ' ',
@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
338 put_device(ldev); 340 put_device(ldev);
339 } 341 }
340 } 342 }
343
344 mutex_unlock(&dev->physical_node_lock);
341 } 345 }
342 mutex_unlock(&acpi_device_lock); 346 mutex_unlock(&acpi_device_lock);
343 return 0; 347 return 0;
@@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
347{ 351{
348 struct acpi_device_physical_node *entry; 352 struct acpi_device_physical_node *entry;
349 353
354 mutex_lock(&adev->physical_node_lock);
355
350 list_for_each_entry(entry, 356 list_for_each_entry(entry,
351 &adev->physical_node_list, node) 357 &adev->physical_node_list, node)
352 if (entry->dev && device_can_wakeup(entry->dev)) { 358 if (entry->dev && device_can_wakeup(entry->dev)) {
353 bool enable = !device_may_wakeup(entry->dev); 359 bool enable = !device_may_wakeup(entry->dev);
354 device_set_wakeup_enable(entry->dev, enable); 360 device_set_wakeup_enable(entry->dev, enable);
355 } 361 }
362
363 mutex_unlock(&adev->physical_node_lock);
356} 364}
357 365
358static ssize_t 366static ssize_t
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 164d49569aeb..a5e9f4a5b281 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -253,7 +253,7 @@ static bool __init processor_physically_present(acpi_handle handle)
253 return true; 253 return true;
254} 254}
255 255
256static void __cpuinit acpi_set_pdc_bits(u32 *buf) 256static void acpi_set_pdc_bits(u32 *buf)
257{ 257{
258 buf[0] = ACPI_PDC_REVISION_ID; 258 buf[0] = ACPI_PDC_REVISION_ID;
259 buf[1] = 1; 259 buf[1] = 1;
@@ -265,7 +265,7 @@ static void __cpuinit acpi_set_pdc_bits(u32 *buf)
265 arch_acpi_set_pdc_bits(buf); 265 arch_acpi_set_pdc_bits(buf);
266} 266}
267 267
268static struct acpi_object_list *__cpuinit acpi_processor_alloc_pdc(void) 268static struct acpi_object_list *acpi_processor_alloc_pdc(void)
269{ 269{
270 struct acpi_object_list *obj_list; 270 struct acpi_object_list *obj_list;
271 union acpi_object *obj; 271 union acpi_object *obj;
@@ -308,7 +308,7 @@ static struct acpi_object_list *__cpuinit acpi_processor_alloc_pdc(void)
308 * _PDC is required for a BIOS-OS handshake for most of the newer 308 * _PDC is required for a BIOS-OS handshake for most of the newer
309 * ACPI processor features. 309 * ACPI processor features.
310 */ 310 */
311static int __cpuinit 311static int
312acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) 312acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
313{ 313{
314 acpi_status status = AE_OK; 314 acpi_status status = AE_OK;
@@ -336,7 +336,7 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
336 return status; 336 return status;
337} 337}
338 338
339void __cpuinit acpi_processor_set_pdc(acpi_handle handle) 339void acpi_processor_set_pdc(acpi_handle handle)
340{ 340{
341 struct acpi_object_list *obj_list; 341 struct acpi_object_list *obj_list;
342 342
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 823be116619e..870eaf5fa547 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -118,9 +118,9 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
118 return; 118 return;
119} 119}
120 120
121static __cpuinit int __acpi_processor_start(struct acpi_device *device); 121static int __acpi_processor_start(struct acpi_device *device);
122 122
123static int __cpuinit acpi_cpu_soft_notify(struct notifier_block *nfb, 123static int acpi_cpu_soft_notify(struct notifier_block *nfb,
124 unsigned long action, void *hcpu) 124 unsigned long action, void *hcpu)
125{ 125{
126 unsigned int cpu = (unsigned long)hcpu; 126 unsigned int cpu = (unsigned long)hcpu;
@@ -162,7 +162,7 @@ static struct notifier_block __refdata acpi_cpu_notifier =
162 .notifier_call = acpi_cpu_soft_notify, 162 .notifier_call = acpi_cpu_soft_notify,
163}; 163};
164 164
165static __cpuinit int __acpi_processor_start(struct acpi_device *device) 165static int __acpi_processor_start(struct acpi_device *device)
166{ 166{
167 struct acpi_processor *pr = acpi_driver_data(device); 167 struct acpi_processor *pr = acpi_driver_data(device);
168 acpi_status status; 168 acpi_status status;
@@ -226,7 +226,7 @@ static __cpuinit int __acpi_processor_start(struct acpi_device *device)
226 return result; 226 return result;
227} 227}
228 228
229static int __cpuinit acpi_processor_start(struct device *dev) 229static int acpi_processor_start(struct device *dev)
230{ 230{
231 struct acpi_device *device; 231 struct acpi_device *device;
232 232
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0461ccc92c54..f98dd00b51a9 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -96,9 +96,7 @@ static int set_max_cstate(const struct dmi_system_id *id)
96 return 0; 96 return 0;
97} 97}
98 98
99/* Actually this shouldn't be __cpuinitdata, would be better to fix the 99static struct dmi_system_id processor_power_dmi_table[] = {
100 callers to only run once -AK */
101static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
102 { set_max_cstate, "Clevo 5600D", { 100 { set_max_cstate, "Clevo 5600D", {
103 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 101 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
104 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 102 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
@@ -1165,7 +1163,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1165 1163
1166static int acpi_processor_registered; 1164static int acpi_processor_registered;
1167 1165
1168int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) 1166int acpi_processor_power_init(struct acpi_processor *pr)
1169{ 1167{
1170 acpi_status status = 0; 1168 acpi_status status = 0;
1171 int retval; 1169 int retval;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 10985573aaa7..8a46c924effd 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -352,10 +352,12 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
352 mutex_lock(&acpi_scan_lock); 352 mutex_lock(&acpi_scan_lock);
353 lock_device_hotplug(); 353 lock_device_hotplug();
354 354
355 acpi_bus_get_device(handle, &device); 355 if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
356 if (device) { 356 acpi_bus_get_device(handle, &device);
357 dev_warn(&device->dev, "Attempt to re-insert\n"); 357 if (device) {
358 goto out; 358 dev_warn(&device->dev, "Attempt to re-insert\n");
359 goto out;
360 }
359 } 361 }
360 acpi_evaluate_hotplug_ost(handle, ost_source, 362 acpi_evaluate_hotplug_ost(handle, ost_source,
361 ACPI_OST_SC_INSERT_IN_PROGRESS, NULL); 363 ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
@@ -1981,6 +1983,9 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
1981 if (acpi_bus_get_device(handle, &device)) 1983 if (acpi_bus_get_device(handle, &device))
1982 return AE_CTRL_DEPTH; 1984 return AE_CTRL_DEPTH;
1983 1985
1986 if (device->handler)
1987 return AE_OK;
1988
1984 ret = acpi_scan_attach_handler(device); 1989 ret = acpi_scan_attach_handler(device);
1985 if (ret) 1990 if (ret)
1986 return ret > 0 ? AE_OK : AE_CTRL_DEPTH; 1991 return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5d7075d25700..e1284b8dc6ee 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -44,6 +44,8 @@
44#include <linux/suspend.h> 44#include <linux/suspend.h>
45#include <acpi/video.h> 45#include <acpi/video.h>
46 46
47#include "internal.h"
48
47#define PREFIX "ACPI: " 49#define PREFIX "ACPI: "
48 50
49#define ACPI_VIDEO_BUS_NAME "Video Bus" 51#define ACPI_VIDEO_BUS_NAME "Video Bus"
@@ -450,6 +452,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
450 }, 452 },
451 { 453 {
452 .callback = video_ignore_initial_backlight, 454 .callback = video_ignore_initial_backlight,
455 .ident = "Fujitsu E753",
456 .matches = {
457 DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
458 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
459 },
460 },
461 {
462 .callback = video_ignore_initial_backlight,
453 .ident = "HP Pavilion dm4", 463 .ident = "HP Pavilion dm4",
454 .matches = { 464 .matches = {
455 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 465 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
@@ -679,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
679 * Some systems always report current brightness level as maximum 689 * Some systems always report current brightness level as maximum
680 * through _BQC, we need to test another value for them. 690 * through _BQC, we need to test another value for them.
681 */ 691 */
682 test_level = current_level == max_level ? br->levels[2] : max_level; 692 test_level = current_level == max_level ? br->levels[3] : max_level;
683 693
684 result = acpi_video_device_lcd_set_level(device, test_level); 694 result = acpi_video_device_lcd_set_level(device, test_level);
685 if (result) 695 if (result)
@@ -898,6 +908,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
898 device->cap._DDC = 1; 908 device->cap._DDC = 1;
899 } 909 }
900 910
911 if (acpi_video_init_brightness(device))
912 return;
913
901 if (acpi_video_backlight_support()) { 914 if (acpi_video_backlight_support()) {
902 struct backlight_properties props; 915 struct backlight_properties props;
903 struct pci_dev *pdev; 916 struct pci_dev *pdev;
@@ -907,9 +920,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
907 static int count = 0; 920 static int count = 0;
908 char *name; 921 char *name;
909 922
910 result = acpi_video_init_brightness(device);
911 if (result)
912 return;
913 name = kasprintf(GFP_KERNEL, "acpi_video%d", count); 923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
914 if (!name) 924 if (!name)
915 return; 925 return;
@@ -969,6 +979,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
969 if (result) 979 if (result)
970 printk(KERN_ERR PREFIX "Create sysfs link\n"); 980 printk(KERN_ERR PREFIX "Create sysfs link\n");
971 981
982 } else {
983 /* Remove the brightness object. */
984 kfree(device->brightness->levels);
985 kfree(device->brightness);
986 device->brightness = NULL;
972 } 987 }
973} 988}
974 989
@@ -1532,14 +1547,20 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
1532 1547
1533/* acpi_video interface */ 1548/* acpi_video interface */
1534 1549
1550/*
1551 * Win8 requires setting bit2 of _DOS to let firmware know it shouldn't
1552 * preform any automatic brightness change on receiving a notification.
1553 */
1535static int acpi_video_bus_start_devices(struct acpi_video_bus *video) 1554static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
1536{ 1555{
1537 return acpi_video_bus_DOS(video, 0, 0); 1556 return acpi_video_bus_DOS(video, 0,
1557 acpi_video_backlight_quirks() ? 1 : 0);
1538} 1558}
1539 1559
1540static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) 1560static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
1541{ 1561{
1542 return acpi_video_bus_DOS(video, 0, 1); 1562 return acpi_video_bus_DOS(video, 0,
1563 acpi_video_backlight_quirks() ? 0 : 1);
1543} 1564}
1544 1565
1545static void acpi_video_bus_notify(struct acpi_device *device, u32 event) 1566static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index e6bd910bc6ed..c3397748ba46 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -38,6 +38,8 @@
38#include <linux/dmi.h> 38#include <linux/dmi.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40 40
41#include "internal.h"
42
41#define PREFIX "ACPI: " 43#define PREFIX "ACPI: "
42 44
43ACPI_MODULE_NAME("video"); 45ACPI_MODULE_NAME("video");
@@ -234,6 +236,12 @@ static void acpi_video_caps_check(void)
234 acpi_video_get_capabilities(NULL); 236 acpi_video_get_capabilities(NULL);
235} 237}
236 238
239bool acpi_video_backlight_quirks(void)
240{
241 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
242}
243EXPORT_SYMBOL(acpi_video_backlight_quirks);
244
237/* Promote the vendor interface instead of the generic video module. 245/* Promote the vendor interface instead of the generic video module.
238 * This function allow DMI blacklists to be implemented by externals 246 * This function allow DMI blacklists to be implemented by externals
239 * platform drivers instead of putting a big blacklist in video_detect.c 247 * platform drivers instead of putting a big blacklist in video_detect.c
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 80dc988f01e4..4e737728aee2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -97,6 +97,15 @@ config SATA_AHCI_PLATFORM
97 97
98 If unsure, say N. 98 If unsure, say N.
99 99
100config AHCI_IMX
101 tristate "Freescale i.MX AHCI SATA support"
102 depends on SATA_AHCI_PLATFORM && MFD_SYSCON
103 help
104 This option enables support for the Freescale i.MX SoC's
105 onboard AHCI SATA.
106
107 If unsure, say N.
108
100config SATA_FSL 109config SATA_FSL
101 tristate "Freescale 3.0Gbps SATA support" 110 tristate "Freescale 3.0Gbps SATA support"
102 depends on FSL_SOC 111 depends on FSL_SOC
@@ -107,7 +116,7 @@ config SATA_FSL
107 If unsure, say N. 116 If unsure, say N.
108 117
109config SATA_INIC162X 118config SATA_INIC162X
110 tristate "Initio 162x SATA support" 119 tristate "Initio 162x SATA support (Very Experimental)"
111 depends on PCI 120 depends on PCI
112 help 121 help
113 This option enables support for Initio 162x Serial ATA. 122 This option enables support for Initio 162x Serial ATA.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index c04d0fd038a3..46518c622460 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o 10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
11obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o 11obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
12obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o 12obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
13obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
13 14
14# SFF w/ custom DMA 15# SFF w/ custom DMA
15obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 16obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5064f3ea20f1..db4380d70031 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1146,11 +1146,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
1146 return rc; 1146 return rc;
1147 1147
1148 for (i = 0; i < host->n_ports; i++) { 1148 for (i = 0; i < host->n_ports; i++) {
1149 const char* desc;
1149 struct ahci_port_priv *pp = host->ports[i]->private_data; 1150 struct ahci_port_priv *pp = host->ports[i]->private_data;
1150 1151
1152 /* pp is NULL for dummy ports */
1153 if (pp)
1154 desc = pp->irq_desc;
1155 else
1156 desc = dev_driver_string(host->dev);
1157
1151 rc = devm_request_threaded_irq(host->dev, 1158 rc = devm_request_threaded_irq(host->dev,
1152 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, 1159 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
1153 pp->irq_desc, host->ports[i]); 1160 desc, host->ports[i]);
1154 if (rc) 1161 if (rc)
1155 goto out_free_irqs; 1162 goto out_free_irqs;
1156 } 1163 }
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
new file mode 100644
index 000000000000..58debb0acc3a
--- /dev/null
+++ b/drivers/ata/ahci_imx.c
@@ -0,0 +1,236 @@
1/*
2 * Freescale IMX AHCI SATA platform driver
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 *
5 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/regmap.h>
24#include <linux/ahci_platform.h>
25#include <linux/of_device.h>
26#include <linux/mfd/syscon.h>
27#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
28#include "ahci.h"
29
30enum {
31 HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
32};
33
34struct imx_ahci_priv {
35 struct platform_device *ahci_pdev;
36 struct clk *sata_ref_clk;
37 struct clk *ahb_clk;
38 struct regmap *gpr;
39};
40
41static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
42{
43 int ret = 0;
44 unsigned int reg_val;
45 struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
46
47 imxpriv->gpr =
48 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
49 if (IS_ERR(imxpriv->gpr)) {
50 dev_err(dev, "failed to find fsl,imx6q-iomux-gpr regmap\n");
51 return PTR_ERR(imxpriv->gpr);
52 }
53
54 ret = clk_prepare_enable(imxpriv->sata_ref_clk);
55 if (ret < 0) {
56 dev_err(dev, "prepare-enable sata_ref clock err:%d\n", ret);
57 return ret;
58 }
59
60 /*
61 * set PHY Paremeters, two steps to configure the GPR13,
62 * one write for rest of parameters, mask of first write
63 * is 0x07fffffd, and the other one write for setting
64 * the mpll_clk_en.
65 */
66 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
67 | IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK
68 | IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK
69 | IMX6Q_GPR13_SATA_SPD_MODE_MASK
70 | IMX6Q_GPR13_SATA_MPLL_SS_EN
71 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
72 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
73 | IMX6Q_GPR13_SATA_TX_LVL_MASK
74 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
75 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
76 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
77 | IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F
78 | IMX6Q_GPR13_SATA_SPD_MODE_3P0G
79 | IMX6Q_GPR13_SATA_MPLL_SS_EN
80 | IMX6Q_GPR13_SATA_TX_ATTEN_9_16
81 | IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB
82 | IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
83 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
84 IMX6Q_GPR13_SATA_MPLL_CLK_EN);
85 usleep_range(100, 200);
86
87 /*
88 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
89 * and IP vendor specific register HOST_TIMER1MS.
90 * Configure CAP_SSS (support stagered spin up).
91 * Implement the port0.
92 * Get the ahb clock rate, and configure the TIMER1MS register.
93 */
94 reg_val = readl(mmio + HOST_CAP);
95 if (!(reg_val & HOST_CAP_SSS)) {
96 reg_val |= HOST_CAP_SSS;
97 writel(reg_val, mmio + HOST_CAP);
98 }
99 reg_val = readl(mmio + HOST_PORTS_IMPL);
100 if (!(reg_val & 0x1)) {
101 reg_val |= 0x1;
102 writel(reg_val, mmio + HOST_PORTS_IMPL);
103 }
104
105 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
106 writel(reg_val, mmio + HOST_TIMER1MS);
107
108 return 0;
109}
110
111static void imx6q_sata_exit(struct device *dev)
112{
113 struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
114
115 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
116 !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
117 clk_disable_unprepare(imxpriv->sata_ref_clk);
118}
119
120static struct ahci_platform_data imx6q_sata_pdata = {
121 .init = imx6q_sata_init,
122 .exit = imx6q_sata_exit,
123};
124
125static const struct of_device_id imx_ahci_of_match[] = {
126 { .compatible = "fsl,imx6q-ahci", .data = &imx6q_sata_pdata},
127 {},
128};
129MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
130
131static int imx_ahci_probe(struct platform_device *pdev)
132{
133 struct device *dev = &pdev->dev;
134 struct resource *mem, *irq, res[2];
135 const struct of_device_id *of_id;
136 const struct ahci_platform_data *pdata = NULL;
137 struct imx_ahci_priv *imxpriv;
138 struct device *ahci_dev;
139 struct platform_device *ahci_pdev;
140 int ret;
141
142 imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
143 if (!imxpriv) {
144 dev_err(dev, "can't alloc ahci_host_priv\n");
145 return -ENOMEM;
146 }
147
148 ahci_pdev = platform_device_alloc("ahci", -1);
149 if (!ahci_pdev)
150 return -ENODEV;
151
152 ahci_dev = &ahci_pdev->dev;
153 ahci_dev->parent = dev;
154
155 imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
156 if (IS_ERR(imxpriv->ahb_clk)) {
157 dev_err(dev, "can't get ahb clock.\n");
158 ret = PTR_ERR(imxpriv->ahb_clk);
159 goto err_out;
160 }
161
162 imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
163 if (IS_ERR(imxpriv->sata_ref_clk)) {
164 dev_err(dev, "can't get sata_ref clock.\n");
165 ret = PTR_ERR(imxpriv->sata_ref_clk);
166 goto err_out;
167 }
168
169 imxpriv->ahci_pdev = ahci_pdev;
170 platform_set_drvdata(pdev, imxpriv);
171
172 of_id = of_match_device(imx_ahci_of_match, dev);
173 if (of_id) {
174 pdata = of_id->data;
175 } else {
176 ret = -EINVAL;
177 goto err_out;
178 }
179
180 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
182 if (!mem || !irq) {
183 dev_err(dev, "no mmio/irq resource\n");
184 ret = -ENOMEM;
185 goto err_out;
186 }
187
188 res[0] = *mem;
189 res[1] = *irq;
190
191 ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
192 ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
193 ahci_dev->of_node = dev->of_node;
194
195 ret = platform_device_add_resources(ahci_pdev, res, 2);
196 if (ret)
197 goto err_out;
198
199 ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
200 if (ret)
201 goto err_out;
202
203 ret = platform_device_add(ahci_pdev);
204 if (ret) {
205err_out:
206 platform_device_put(ahci_pdev);
207 return ret;
208 }
209
210 return 0;
211}
212
213static int imx_ahci_remove(struct platform_device *pdev)
214{
215 struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
216 struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
217
218 platform_device_unregister(ahci_pdev);
219 return 0;
220}
221
222static struct platform_driver imx_ahci_driver = {
223 .probe = imx_ahci_probe,
224 .remove = imx_ahci_remove,
225 .driver = {
226 .name = "ahci-imx",
227 .owner = THIS_MODULE,
228 .of_match_table = imx_ahci_of_match,
229 },
230};
231module_platform_driver(imx_ahci_driver);
232
233MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
234MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
235MODULE_LICENSE("GPL");
236MODULE_ALIAS("ahci:imx");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b52a10c8eeb9..513ad7ed0c99 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
330 /* SATA Controller IDE (Wellsburg) */ 330 /* SATA Controller IDE (Wellsburg) */
331 { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 331 { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
332 /* SATA Controller IDE (Wellsburg) */ 332 /* SATA Controller IDE (Wellsburg) */
333 { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 333 { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
334 /* SATA Controller IDE (Wellsburg) */ 334 /* SATA Controller IDE (Wellsburg) */
335 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 335 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
336 /* SATA Controller IDE (Wellsburg) */ 336 /* SATA Controller IDE (Wellsburg) */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 83c08907e042..b1e880a3c3da 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -206,8 +206,10 @@ static ssize_t ata_scsi_park_store(struct device *device,
206 unsigned long flags; 206 unsigned long flags;
207 int rc; 207 int rc;
208 208
209 rc = strict_strtol(buf, 10, &input); 209 rc = kstrtol(buf, 10, &input);
210 if (rc || input < -2) 210 if (rc)
211 return rc;
212 if (input < -2)
211 return -EINVAL; 213 return -EINVAL;
212 if (input > ATA_TMOUT_MAX_PARK) { 214 if (input > ATA_TMOUT_MAX_PARK) {
213 rc = -EOVERFLOW; 215 rc = -EOVERFLOW;
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4ec7c04b3f82..26386f0b89a8 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = {
237 /* sentinel */ 237 /* sentinel */
238 } 238 }
239}; 239};
240MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
240 241
241static struct platform_driver pata_imx_driver = { 242static struct platform_driver pata_imx_driver = {
242 .probe = pata_imx_probe, 243 .probe = pata_imx_probe,
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e45131748248..5c54d957370a 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -6,6 +6,18 @@
6 * 6 *
7 * This file is released under GPL v2. 7 * This file is released under GPL v2.
8 * 8 *
9 * **** WARNING ****
10 *
11 * This driver never worked properly and unfortunately data corruption is
12 * relatively common. There isn't anyone working on the driver and there's
13 * no support from the vendor. Do not use this driver in any production
14 * environment.
15 *
16 * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
17 * https://bugzilla.kernel.org/show_bug.cgi?id=60565
18 *
19 * *****************
20 *
9 * This controller is eccentric and easily locks up if something isn't 21 * This controller is eccentric and easily locks up if something isn't
10 * right. Documentation is available at initio's website but it only 22 * right. Documentation is available at initio's website but it only
11 * documents registers (not programming model). 23 * documents registers (not programming model).
@@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
807 819
808 ata_print_version_once(&pdev->dev, DRV_VERSION); 820 ata_print_version_once(&pdev->dev, DRV_VERSION);
809 821
822 dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
823
810 /* alloc host */ 824 /* alloc host */
811 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 825 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
812 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 826 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index dc3ea237f086..8856d74545d9 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -528,9 +528,12 @@ static int device_add_attrs(struct device *dev)
528 int error; 528 int error;
529 529
530 if (class) { 530 if (class) {
531 error = device_add_attributes(dev, class->dev_attrs); 531 error = device_add_groups(dev, class->dev_groups);
532 if (error) 532 if (error)
533 return error; 533 return error;
534 error = device_add_attributes(dev, class->dev_attrs);
535 if (error)
536 goto err_remove_class_groups;
534 error = device_add_bin_attributes(dev, class->dev_bin_attrs); 537 error = device_add_bin_attributes(dev, class->dev_bin_attrs);
535 if (error) 538 if (error)
536 goto err_remove_class_attrs; 539 goto err_remove_class_attrs;
@@ -563,6 +566,9 @@ static int device_add_attrs(struct device *dev)
563 err_remove_class_attrs: 566 err_remove_class_attrs:
564 if (class) 567 if (class)
565 device_remove_attributes(dev, class->dev_attrs); 568 device_remove_attributes(dev, class->dev_attrs);
569 err_remove_class_groups:
570 if (class)
571 device_remove_groups(dev, class->dev_groups);
566 572
567 return error; 573 return error;
568} 574}
@@ -581,6 +587,7 @@ static void device_remove_attrs(struct device *dev)
581 if (class) { 587 if (class) {
582 device_remove_attributes(dev, class->dev_attrs); 588 device_remove_attributes(dev, class->dev_attrs);
583 device_remove_bin_attributes(dev, class->dev_bin_attrs); 589 device_remove_bin_attributes(dev, class->dev_bin_attrs);
590 device_remove_groups(dev, class->dev_groups);
584 } 591 }
585} 592}
586 593
@@ -1667,34 +1674,11 @@ static void device_create_release(struct device *dev)
1667 kfree(dev); 1674 kfree(dev);
1668} 1675}
1669 1676
1670/** 1677static struct device *
1671 * device_create_vargs - creates a device and registers it with sysfs 1678device_create_groups_vargs(struct class *class, struct device *parent,
1672 * @class: pointer to the struct class that this device should be registered to 1679 dev_t devt, void *drvdata,
1673 * @parent: pointer to the parent struct device of this new device, if any 1680 const struct attribute_group **groups,
1674 * @devt: the dev_t for the char device to be added 1681 const char *fmt, va_list args)
1675 * @drvdata: the data to be added to the device for callbacks
1676 * @fmt: string for the device's name
1677 * @args: va_list for the device's name
1678 *
1679 * This function can be used by char device classes. A struct device
1680 * will be created in sysfs, registered to the specified class.
1681 *
1682 * A "dev" file will be created, showing the dev_t for the device, if
1683 * the dev_t is not 0,0.
1684 * If a pointer to a parent struct device is passed in, the newly created
1685 * struct device will be a child of that device in sysfs.
1686 * The pointer to the struct device will be returned from the call.
1687 * Any further sysfs files that might be required can be created using this
1688 * pointer.
1689 *
1690 * Returns &struct device pointer on success, or ERR_PTR() on error.
1691 *
1692 * Note: the struct class passed to this function must have previously
1693 * been created with a call to class_create().
1694 */
1695struct device *device_create_vargs(struct class *class, struct device *parent,
1696 dev_t devt, void *drvdata, const char *fmt,
1697 va_list args)
1698{ 1682{
1699 struct device *dev = NULL; 1683 struct device *dev = NULL;
1700 int retval = -ENODEV; 1684 int retval = -ENODEV;
@@ -1711,6 +1695,7 @@ struct device *device_create_vargs(struct class *class, struct device *parent,
1711 dev->devt = devt; 1695 dev->devt = devt;
1712 dev->class = class; 1696 dev->class = class;
1713 dev->parent = parent; 1697 dev->parent = parent;
1698 dev->groups = groups;
1714 dev->release = device_create_release; 1699 dev->release = device_create_release;
1715 dev_set_drvdata(dev, drvdata); 1700 dev_set_drvdata(dev, drvdata);
1716 1701
@@ -1728,6 +1713,39 @@ error:
1728 put_device(dev); 1713 put_device(dev);
1729 return ERR_PTR(retval); 1714 return ERR_PTR(retval);
1730} 1715}
1716
1717/**
1718 * device_create_vargs - creates a device and registers it with sysfs
1719 * @class: pointer to the struct class that this device should be registered to
1720 * @parent: pointer to the parent struct device of this new device, if any
1721 * @devt: the dev_t for the char device to be added
1722 * @drvdata: the data to be added to the device for callbacks
1723 * @fmt: string for the device's name
1724 * @args: va_list for the device's name
1725 *
1726 * This function can be used by char device classes. A struct device
1727 * will be created in sysfs, registered to the specified class.
1728 *
1729 * A "dev" file will be created, showing the dev_t for the device, if
1730 * the dev_t is not 0,0.
1731 * If a pointer to a parent struct device is passed in, the newly created
1732 * struct device will be a child of that device in sysfs.
1733 * The pointer to the struct device will be returned from the call.
1734 * Any further sysfs files that might be required can be created using this
1735 * pointer.
1736 *
1737 * Returns &struct device pointer on success, or ERR_PTR() on error.
1738 *
1739 * Note: the struct class passed to this function must have previously
1740 * been created with a call to class_create().
1741 */
1742struct device *device_create_vargs(struct class *class, struct device *parent,
1743 dev_t devt, void *drvdata, const char *fmt,
1744 va_list args)
1745{
1746 return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
1747 fmt, args);
1748}
1731EXPORT_SYMBOL_GPL(device_create_vargs); 1749EXPORT_SYMBOL_GPL(device_create_vargs);
1732 1750
1733/** 1751/**
@@ -1767,6 +1785,50 @@ struct device *device_create(struct class *class, struct device *parent,
1767} 1785}
1768EXPORT_SYMBOL_GPL(device_create); 1786EXPORT_SYMBOL_GPL(device_create);
1769 1787
1788/**
1789 * device_create_with_groups - creates a device and registers it with sysfs
1790 * @class: pointer to the struct class that this device should be registered to
1791 * @parent: pointer to the parent struct device of this new device, if any
1792 * @devt: the dev_t for the char device to be added
1793 * @drvdata: the data to be added to the device for callbacks
1794 * @groups: NULL-terminated list of attribute groups to be created
1795 * @fmt: string for the device's name
1796 *
1797 * This function can be used by char device classes. A struct device
1798 * will be created in sysfs, registered to the specified class.
1799 * Additional attributes specified in the groups parameter will also
1800 * be created automatically.
1801 *
1802 * A "dev" file will be created, showing the dev_t for the device, if
1803 * the dev_t is not 0,0.
1804 * If a pointer to a parent struct device is passed in, the newly created
1805 * struct device will be a child of that device in sysfs.
1806 * The pointer to the struct device will be returned from the call.
1807 * Any further sysfs files that might be required can be created using this
1808 * pointer.
1809 *
1810 * Returns &struct device pointer on success, or ERR_PTR() on error.
1811 *
1812 * Note: the struct class passed to this function must have previously
1813 * been created with a call to class_create().
1814 */
1815struct device *device_create_with_groups(struct class *class,
1816 struct device *parent, dev_t devt,
1817 void *drvdata,
1818 const struct attribute_group **groups,
1819 const char *fmt, ...)
1820{
1821 va_list vargs;
1822 struct device *dev;
1823
1824 va_start(vargs, fmt);
1825 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
1826 fmt, vargs);
1827 va_end(vargs);
1828 return dev;
1829}
1830EXPORT_SYMBOL_GPL(device_create_with_groups);
1831
1770static int __match_devt(struct device *dev, const void *data) 1832static int __match_devt(struct device *dev, const void *data)
1771{ 1833{
1772 const dev_t *devt = data; 1834 const dev_t *devt = data;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index a16d20e389f0..4c358bc44c72 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -278,7 +278,7 @@ static void cpu_device_release(struct device *dev)
278 * 278 *
279 * Initialize and register the CPU device. 279 * Initialize and register the CPU device.
280 */ 280 */
281int __cpuinit register_cpu(struct cpu *cpu, int num) 281int register_cpu(struct cpu *cpu, int num)
282{ 282{
283 int error; 283 int error;
284 284
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 15789875128e..3c3197a8de41 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -522,6 +522,7 @@ static void platform_drv_shutdown(struct device *_dev)
522/** 522/**
523 * __platform_driver_register - register a driver for platform-level devices 523 * __platform_driver_register - register a driver for platform-level devices
524 * @drv: platform driver structure 524 * @drv: platform driver structure
525 * @owner: owning module/driver
525 */ 526 */
526int __platform_driver_register(struct platform_driver *drv, 527int __platform_driver_register(struct platform_driver *drv,
527 struct module *owner) 528 struct module *owner)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e69102696533..3455f833e473 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
719 } 719 }
720 } 720 }
721 721
722 return regcache_sync_block_raw_flush(map, &data, base, regtmp); 722 return regcache_sync_block_raw_flush(map, &data, base, regtmp +
723 map->reg_stride);
723} 724}
724 725
725int regcache_sync_block(struct regmap *map, void *block, 726int regcache_sync_block(struct regmap *map, void *block,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 95920583e31e..e0d0c7d8a5c5 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1853,7 +1853,7 @@ int regmap_async_complete(struct regmap *map)
1853 int ret; 1853 int ret;
1854 1854
1855 /* Nothing to do with no async support */ 1855 /* Nothing to do with no async support */
1856 if (!map->bus->async_write) 1856 if (!map->bus || !map->bus->async_write)
1857 return 0; 1857 return 0;
1858 1858
1859 trace_regmap_async_complete_start(map->dev); 1859 trace_regmap_async_complete_start(map->dev);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index ae989c57cd5e..2f5919ed91ab 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -143,22 +143,22 @@ static struct attribute_group topology_attr_group = {
143}; 143};
144 144
145/* Add/Remove cpu_topology interface for CPU device */ 145/* Add/Remove cpu_topology interface for CPU device */
146static int __cpuinit topology_add_dev(unsigned int cpu) 146static int topology_add_dev(unsigned int cpu)
147{ 147{
148 struct device *dev = get_cpu_device(cpu); 148 struct device *dev = get_cpu_device(cpu);
149 149
150 return sysfs_create_group(&dev->kobj, &topology_attr_group); 150 return sysfs_create_group(&dev->kobj, &topology_attr_group);
151} 151}
152 152
153static void __cpuinit topology_remove_dev(unsigned int cpu) 153static void topology_remove_dev(unsigned int cpu)
154{ 154{
155 struct device *dev = get_cpu_device(cpu); 155 struct device *dev = get_cpu_device(cpu);
156 156
157 sysfs_remove_group(&dev->kobj, &topology_attr_group); 157 sysfs_remove_group(&dev->kobj, &topology_attr_group);
158} 158}
159 159
160static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, 160static int topology_cpu_callback(struct notifier_block *nfb,
161 unsigned long action, void *hcpu) 161 unsigned long action, void *hcpu)
162{ 162{
163 unsigned int cpu = (unsigned long)hcpu; 163 unsigned int cpu = (unsigned long)hcpu;
164 int rc = 0; 164 int rc = 0;
@@ -178,7 +178,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
178 return notifier_from_errno(rc); 178 return notifier_from_errno(rc);
179} 179}
180 180
181static int __cpuinit topology_sysfs_init(void) 181static int topology_sysfs_init(void)
182{ 182{
183 int cpu; 183 int cpu;
184 int rc; 184 int rc;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index b81ddfea1da0..e07a5fd58ad7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -532,11 +532,11 @@ config BLK_DEV_RBD
532 If unsure, say N. 532 If unsure, say N.
533 533
534config BLK_DEV_RSXX 534config BLK_DEV_RSXX
535 tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver" 535 tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
536 depends on PCI 536 depends on PCI
537 help 537 help
538 Device driver for IBM's high speed PCIe SSD 538 Device driver for IBM's high speed PCIe SSD
539 storage devices: FlashSystem-70 and FlashSystem-80. 539 storage device: Flash Adapter 900GB Full Height.
540 540
541 To compile this driver as a module, choose M here: the 541 To compile this driver as a module, choose M here: the
542 module will be called rsxx. 542 module will be called rsxx.
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 6608076dc39e..28c73ca320a8 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -659,6 +659,27 @@ void drbd_al_shrink(struct drbd_conf *mdev)
659 wake_up(&mdev->al_wait); 659 wake_up(&mdev->al_wait);
660} 660}
661 661
662int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
663{
664 struct al_transaction_on_disk *al = buffer;
665 struct drbd_md *md = &mdev->ldev->md;
666 sector_t al_base = md->md_offset + md->al_offset;
667 int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
668 int i;
669
670 memset(al, 0, 4096);
671 al->magic = cpu_to_be32(DRBD_AL_MAGIC);
672 al->transaction_type = cpu_to_be16(AL_TR_INITIALIZED);
673 al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
674
675 for (i = 0; i < al_size_4k; i++) {
676 int err = drbd_md_sync_page_io(mdev, mdev->ldev, al_base + i * 8, WRITE);
677 if (err)
678 return err;
679 }
680 return 0;
681}
682
662static int w_update_odbm(struct drbd_work *w, int unused) 683static int w_update_odbm(struct drbd_work *w, int unused)
663{ 684{
664 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); 685 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index f943aacfdad8..2d7f608d181c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -832,6 +832,7 @@ struct drbd_tconn { /* is a resource from the config file */
832 unsigned susp_nod:1; /* IO suspended because no data */ 832 unsigned susp_nod:1; /* IO suspended because no data */
833 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ 833 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
834 struct mutex cstate_mutex; /* Protects graceful disconnects */ 834 struct mutex cstate_mutex; /* Protects graceful disconnects */
835 unsigned int connect_cnt; /* Inc each time a connection is established */
835 836
836 unsigned long flags; 837 unsigned long flags;
837 struct net_conf *net_conf; /* content protected by rcu */ 838 struct net_conf *net_conf; /* content protected by rcu */
@@ -1132,6 +1133,7 @@ extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
1132void drbd_print_uuids(struct drbd_conf *mdev, const char *text); 1133void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
1133 1134
1134extern void conn_md_sync(struct drbd_tconn *tconn); 1135extern void conn_md_sync(struct drbd_tconn *tconn);
1136extern void drbd_md_write(struct drbd_conf *mdev, void *buffer);
1135extern void drbd_md_sync(struct drbd_conf *mdev); 1137extern void drbd_md_sync(struct drbd_conf *mdev);
1136extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); 1138extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
1137extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1139extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
@@ -1466,8 +1468,16 @@ extern void drbd_suspend_io(struct drbd_conf *mdev);
1466extern void drbd_resume_io(struct drbd_conf *mdev); 1468extern void drbd_resume_io(struct drbd_conf *mdev);
1467extern char *ppsize(char *buf, unsigned long long size); 1469extern char *ppsize(char *buf, unsigned long long size);
1468extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int); 1470extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
1469enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1471enum determine_dev_size {
1470extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1472 DS_ERROR_SHRINK = -3,
1473 DS_ERROR_SPACE_MD = -2,
1474 DS_ERROR = -1,
1475 DS_UNCHANGED = 0,
1476 DS_SHRUNK = 1,
1477 DS_GREW = 2
1478};
1479extern enum determine_dev_size
1480drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
1471extern void resync_after_online_grow(struct drbd_conf *); 1481extern void resync_after_online_grow(struct drbd_conf *);
1472extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); 1482extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
1473extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, 1483extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
@@ -1633,6 +1643,7 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
1633#define drbd_set_out_of_sync(mdev, sector, size) \ 1643#define drbd_set_out_of_sync(mdev, sector, size) \
1634 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) 1644 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
1635extern void drbd_al_shrink(struct drbd_conf *mdev); 1645extern void drbd_al_shrink(struct drbd_conf *mdev);
1646extern int drbd_initialize_al(struct drbd_conf *, void *);
1636 1647
1637/* drbd_nl.c */ 1648/* drbd_nl.c */
1638/* state info broadcast */ 1649/* state info broadcast */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a5dca6affcbb..55635edf563b 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2762,8 +2762,6 @@ int __init drbd_init(void)
2762 /* 2762 /*
2763 * allocate all necessary structs 2763 * allocate all necessary structs
2764 */ 2764 */
2765 err = -ENOMEM;
2766
2767 init_waitqueue_head(&drbd_pp_wait); 2765 init_waitqueue_head(&drbd_pp_wait);
2768 2766
2769 drbd_proc = NULL; /* play safe for drbd_cleanup */ 2767 drbd_proc = NULL; /* play safe for drbd_cleanup */
@@ -2773,6 +2771,7 @@ int __init drbd_init(void)
2773 if (err) 2771 if (err)
2774 goto fail; 2772 goto fail;
2775 2773
2774 err = -ENOMEM;
2776 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); 2775 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
2777 if (!drbd_proc) { 2776 if (!drbd_proc) {
2778 printk(KERN_ERR "drbd: unable to register proc file\n"); 2777 printk(KERN_ERR "drbd: unable to register proc file\n");
@@ -2803,7 +2802,6 @@ int __init drbd_init(void)
2803fail: 2802fail:
2804 drbd_cleanup(); 2803 drbd_cleanup();
2805 if (err == -ENOMEM) 2804 if (err == -ENOMEM)
2806 /* currently always the case */
2807 printk(KERN_ERR "drbd: ran out of memory\n"); 2805 printk(KERN_ERR "drbd: ran out of memory\n");
2808 else 2806 else
2809 printk(KERN_ERR "drbd: initialization failure\n"); 2807 printk(KERN_ERR "drbd: initialization failure\n");
@@ -2881,34 +2879,14 @@ struct meta_data_on_disk {
2881 u8 reserved_u8[4096 - (7*8 + 10*4)]; 2879 u8 reserved_u8[4096 - (7*8 + 10*4)];
2882} __packed; 2880} __packed;
2883 2881
2884/** 2882
2885 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set 2883
2886 * @mdev: DRBD device. 2884void drbd_md_write(struct drbd_conf *mdev, void *b)
2887 */
2888void drbd_md_sync(struct drbd_conf *mdev)
2889{ 2885{
2890 struct meta_data_on_disk *buffer; 2886 struct meta_data_on_disk *buffer = b;
2891 sector_t sector; 2887 sector_t sector;
2892 int i; 2888 int i;
2893 2889
2894 /* Don't accidentally change the DRBD meta data layout. */
2895 BUILD_BUG_ON(UI_SIZE != 4);
2896 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
2897
2898 del_timer(&mdev->md_sync_timer);
2899 /* timer may be rearmed by drbd_md_mark_dirty() now. */
2900 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2901 return;
2902
2903 /* We use here D_FAILED and not D_ATTACHING because we try to write
2904 * metadata even if we detach due to a disk failure! */
2905 if (!get_ldev_if_state(mdev, D_FAILED))
2906 return;
2907
2908 buffer = drbd_md_get_buffer(mdev);
2909 if (!buffer)
2910 goto out;
2911
2912 memset(buffer, 0, sizeof(*buffer)); 2890 memset(buffer, 0, sizeof(*buffer));
2913 2891
2914 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); 2892 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -2937,6 +2915,35 @@ void drbd_md_sync(struct drbd_conf *mdev)
2937 dev_err(DEV, "meta data update failed!\n"); 2915 dev_err(DEV, "meta data update failed!\n");
2938 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 2916 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
2939 } 2917 }
2918}
2919
2920/**
2921 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2922 * @mdev: DRBD device.
2923 */
2924void drbd_md_sync(struct drbd_conf *mdev)
2925{
2926 struct meta_data_on_disk *buffer;
2927
2928 /* Don't accidentally change the DRBD meta data layout. */
2929 BUILD_BUG_ON(UI_SIZE != 4);
2930 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
2931
2932 del_timer(&mdev->md_sync_timer);
2933 /* timer may be rearmed by drbd_md_mark_dirty() now. */
2934 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2935 return;
2936
2937 /* We use here D_FAILED and not D_ATTACHING because we try to write
2938 * metadata even if we detach due to a disk failure! */
2939 if (!get_ldev_if_state(mdev, D_FAILED))
2940 return;
2941
2942 buffer = drbd_md_get_buffer(mdev);
2943 if (!buffer)
2944 goto out;
2945
2946 drbd_md_write(mdev, buffer);
2940 2947
2941 /* Update mdev->ldev->md.la_size_sect, 2948 /* Update mdev->ldev->md.la_size_sect,
2942 * since we updated it on metadata. */ 2949 * since we updated it on metadata. */
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 9e3f441e7e84..8cc1e640f485 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -417,6 +417,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
417 417
418bool conn_try_outdate_peer(struct drbd_tconn *tconn) 418bool conn_try_outdate_peer(struct drbd_tconn *tconn)
419{ 419{
420 unsigned int connect_cnt;
420 union drbd_state mask = { }; 421 union drbd_state mask = { };
421 union drbd_state val = { }; 422 union drbd_state val = { };
422 enum drbd_fencing_p fp; 423 enum drbd_fencing_p fp;
@@ -428,6 +429,10 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
428 return false; 429 return false;
429 } 430 }
430 431
432 spin_lock_irq(&tconn->req_lock);
433 connect_cnt = tconn->connect_cnt;
434 spin_unlock_irq(&tconn->req_lock);
435
431 fp = highest_fencing_policy(tconn); 436 fp = highest_fencing_policy(tconn);
432 switch (fp) { 437 switch (fp) {
433 case FP_NOT_AVAIL: 438 case FP_NOT_AVAIL:
@@ -492,8 +497,14 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
492 here, because we might were able to re-establish the connection in the 497 here, because we might were able to re-establish the connection in the
493 meantime. */ 498 meantime. */
494 spin_lock_irq(&tconn->req_lock); 499 spin_lock_irq(&tconn->req_lock);
495 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) 500 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
496 _conn_request_state(tconn, mask, val, CS_VERBOSE); 501 if (tconn->connect_cnt != connect_cnt)
502 /* In case the connection was established and droped
503 while the fence-peer handler was running, ignore it */
504 conn_info(tconn, "Ignoring fence-peer exit code\n");
505 else
506 _conn_request_state(tconn, mask, val, CS_VERBOSE);
507 }
497 spin_unlock_irq(&tconn->req_lock); 508 spin_unlock_irq(&tconn->req_lock);
498 509
499 return conn_highest_pdsk(tconn) <= D_OUTDATED; 510 return conn_highest_pdsk(tconn) <= D_OUTDATED;
@@ -816,15 +827,20 @@ void drbd_resume_io(struct drbd_conf *mdev)
816 * Returns 0 on success, negative return values indicate errors. 827 * Returns 0 on success, negative return values indicate errors.
817 * You should call drbd_md_sync() after calling this function. 828 * You should call drbd_md_sync() after calling this function.
818 */ 829 */
819enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 830enum determine_dev_size
831drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
820{ 832{
821 sector_t prev_first_sect, prev_size; /* previous meta location */ 833 sector_t prev_first_sect, prev_size; /* previous meta location */
822 sector_t la_size_sect, u_size; 834 sector_t la_size_sect, u_size;
835 struct drbd_md *md = &mdev->ldev->md;
836 u32 prev_al_stripe_size_4k;
837 u32 prev_al_stripes;
823 sector_t size; 838 sector_t size;
824 char ppb[10]; 839 char ppb[10];
840 void *buffer;
825 841
826 int md_moved, la_size_changed; 842 int md_moved, la_size_changed;
827 enum determine_dev_size rv = unchanged; 843 enum determine_dev_size rv = DS_UNCHANGED;
828 844
829 /* race: 845 /* race:
830 * application request passes inc_ap_bio, 846 * application request passes inc_ap_bio,
@@ -836,6 +852,11 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
836 * still lock the act_log to not trigger ASSERTs there. 852 * still lock the act_log to not trigger ASSERTs there.
837 */ 853 */
838 drbd_suspend_io(mdev); 854 drbd_suspend_io(mdev);
855 buffer = drbd_md_get_buffer(mdev); /* Lock meta-data IO */
856 if (!buffer) {
857 drbd_resume_io(mdev);
858 return DS_ERROR;
859 }
839 860
840 /* no wait necessary anymore, actually we could assert that */ 861 /* no wait necessary anymore, actually we could assert that */
841 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 862 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -844,7 +865,17 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
844 prev_size = mdev->ldev->md.md_size_sect; 865 prev_size = mdev->ldev->md.md_size_sect;
845 la_size_sect = mdev->ldev->md.la_size_sect; 866 la_size_sect = mdev->ldev->md.la_size_sect;
846 867
847 /* TODO: should only be some assert here, not (re)init... */ 868 if (rs) {
869 /* rs is non NULL if we should change the AL layout only */
870
871 prev_al_stripes = md->al_stripes;
872 prev_al_stripe_size_4k = md->al_stripe_size_4k;
873
874 md->al_stripes = rs->al_stripes;
875 md->al_stripe_size_4k = rs->al_stripe_size / 4;
876 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
877 }
878
848 drbd_md_set_sector_offsets(mdev, mdev->ldev); 879 drbd_md_set_sector_offsets(mdev, mdev->ldev);
849 880
850 rcu_read_lock(); 881 rcu_read_lock();
@@ -852,6 +883,21 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
852 rcu_read_unlock(); 883 rcu_read_unlock();
853 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED); 884 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
854 885
886 if (size < la_size_sect) {
887 if (rs && u_size == 0) {
888 /* Remove "rs &&" later. This check should always be active, but
889 right now the receiver expects the permissive behavior */
890 dev_warn(DEV, "Implicit shrink not allowed. "
891 "Use --size=%llus for explicit shrink.\n",
892 (unsigned long long)size);
893 rv = DS_ERROR_SHRINK;
894 }
895 if (u_size > size)
896 rv = DS_ERROR_SPACE_MD;
897 if (rv != DS_UNCHANGED)
898 goto err_out;
899 }
900
855 if (drbd_get_capacity(mdev->this_bdev) != size || 901 if (drbd_get_capacity(mdev->this_bdev) != size ||
856 drbd_bm_capacity(mdev) != size) { 902 drbd_bm_capacity(mdev) != size) {
857 int err; 903 int err;
@@ -867,7 +913,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
867 "Leaving size unchanged at size = %lu KB\n", 913 "Leaving size unchanged at size = %lu KB\n",
868 (unsigned long)size); 914 (unsigned long)size);
869 } 915 }
870 rv = dev_size_error; 916 rv = DS_ERROR;
871 } 917 }
872 /* racy, see comments above. */ 918 /* racy, see comments above. */
873 drbd_set_my_capacity(mdev, size); 919 drbd_set_my_capacity(mdev, size);
@@ -875,38 +921,57 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
875 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 921 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
876 (unsigned long long)size>>1); 922 (unsigned long long)size>>1);
877 } 923 }
878 if (rv == dev_size_error) 924 if (rv <= DS_ERROR)
879 goto out; 925 goto err_out;
880 926
881 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect); 927 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
882 928
883 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 929 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
884 || prev_size != mdev->ldev->md.md_size_sect; 930 || prev_size != mdev->ldev->md.md_size_sect;
885 931
886 if (la_size_changed || md_moved) { 932 if (la_size_changed || md_moved || rs) {
887 int err; 933 u32 prev_flags;
888 934
889 drbd_al_shrink(mdev); /* All extents inactive. */ 935 drbd_al_shrink(mdev); /* All extents inactive. */
936
937 prev_flags = md->flags;
938 md->flags &= ~MDF_PRIMARY_IND;
939 drbd_md_write(mdev, buffer);
940
890 dev_info(DEV, "Writing the whole bitmap, %s\n", 941 dev_info(DEV, "Writing the whole bitmap, %s\n",
891 la_size_changed && md_moved ? "size changed and md moved" : 942 la_size_changed && md_moved ? "size changed and md moved" :
892 la_size_changed ? "size changed" : "md moved"); 943 la_size_changed ? "size changed" : "md moved");
893 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 944 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
894 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write, 945 drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
895 "size changed", BM_LOCKED_MASK); 946 "size changed", BM_LOCKED_MASK);
896 if (err) { 947 drbd_initialize_al(mdev, buffer);
897 rv = dev_size_error; 948
898 goto out; 949 md->flags = prev_flags;
899 } 950 drbd_md_write(mdev, buffer);
900 drbd_md_mark_dirty(mdev); 951
952 if (rs)
953 dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
954 md->al_stripes, md->al_stripe_size_4k * 4);
901 } 955 }
902 956
903 if (size > la_size_sect) 957 if (size > la_size_sect)
904 rv = grew; 958 rv = DS_GREW;
905 if (size < la_size_sect) 959 if (size < la_size_sect)
906 rv = shrunk; 960 rv = DS_SHRUNK;
907out: 961
962 if (0) {
963 err_out:
964 if (rs) {
965 md->al_stripes = prev_al_stripes;
966 md->al_stripe_size_4k = prev_al_stripe_size_4k;
967 md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
968
969 drbd_md_set_sector_offsets(mdev, mdev->ldev);
970 }
971 }
908 lc_unlock(mdev->act_log); 972 lc_unlock(mdev->act_log);
909 wake_up(&mdev->al_wait); 973 wake_up(&mdev->al_wait);
974 drbd_md_put_buffer(mdev);
910 drbd_resume_io(mdev); 975 drbd_resume_io(mdev);
911 976
912 return rv; 977 return rv;
@@ -1607,11 +1672,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1607 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1672 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1608 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1673 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1609 1674
1610 dd = drbd_determine_dev_size(mdev, 0); 1675 dd = drbd_determine_dev_size(mdev, 0, NULL);
1611 if (dd == dev_size_error) { 1676 if (dd <= DS_ERROR) {
1612 retcode = ERR_NOMEM_BITMAP; 1677 retcode = ERR_NOMEM_BITMAP;
1613 goto force_diskless_dec; 1678 goto force_diskless_dec;
1614 } else if (dd == grew) 1679 } else if (dd == DS_GREW)
1615 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1680 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1616 1681
1617 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) || 1682 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
@@ -2305,6 +2370,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2305 struct drbd_conf *mdev; 2370 struct drbd_conf *mdev;
2306 enum drbd_ret_code retcode; 2371 enum drbd_ret_code retcode;
2307 enum determine_dev_size dd; 2372 enum determine_dev_size dd;
2373 bool change_al_layout = false;
2308 enum dds_flags ddsf; 2374 enum dds_flags ddsf;
2309 sector_t u_size; 2375 sector_t u_size;
2310 int err; 2376 int err;
@@ -2315,31 +2381,33 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2315 if (retcode != NO_ERROR) 2381 if (retcode != NO_ERROR)
2316 goto fail; 2382 goto fail;
2317 2383
2384 mdev = adm_ctx.mdev;
2385 if (!get_ldev(mdev)) {
2386 retcode = ERR_NO_DISK;
2387 goto fail;
2388 }
2389
2318 memset(&rs, 0, sizeof(struct resize_parms)); 2390 memset(&rs, 0, sizeof(struct resize_parms));
2391 rs.al_stripes = mdev->ldev->md.al_stripes;
2392 rs.al_stripe_size = mdev->ldev->md.al_stripe_size_4k * 4;
2319 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { 2393 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2320 err = resize_parms_from_attrs(&rs, info); 2394 err = resize_parms_from_attrs(&rs, info);
2321 if (err) { 2395 if (err) {
2322 retcode = ERR_MANDATORY_TAG; 2396 retcode = ERR_MANDATORY_TAG;
2323 drbd_msg_put_info(from_attrs_err_to_txt(err)); 2397 drbd_msg_put_info(from_attrs_err_to_txt(err));
2324 goto fail; 2398 goto fail_ldev;
2325 } 2399 }
2326 } 2400 }
2327 2401
2328 mdev = adm_ctx.mdev;
2329 if (mdev->state.conn > C_CONNECTED) { 2402 if (mdev->state.conn > C_CONNECTED) {
2330 retcode = ERR_RESIZE_RESYNC; 2403 retcode = ERR_RESIZE_RESYNC;
2331 goto fail; 2404 goto fail_ldev;
2332 } 2405 }
2333 2406
2334 if (mdev->state.role == R_SECONDARY && 2407 if (mdev->state.role == R_SECONDARY &&
2335 mdev->state.peer == R_SECONDARY) { 2408 mdev->state.peer == R_SECONDARY) {
2336 retcode = ERR_NO_PRIMARY; 2409 retcode = ERR_NO_PRIMARY;
2337 goto fail; 2410 goto fail_ldev;
2338 }
2339
2340 if (!get_ldev(mdev)) {
2341 retcode = ERR_NO_DISK;
2342 goto fail;
2343 } 2411 }
2344 2412
2345 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) { 2413 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
@@ -2358,6 +2426,28 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2358 } 2426 }
2359 } 2427 }
2360 2428
2429 if (mdev->ldev->md.al_stripes != rs.al_stripes ||
2430 mdev->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2431 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2432
2433 if (al_size_k > (16 * 1024 * 1024)) {
2434 retcode = ERR_MD_LAYOUT_TOO_BIG;
2435 goto fail_ldev;
2436 }
2437
2438 if (al_size_k < MD_32kB_SECT/2) {
2439 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2440 goto fail_ldev;
2441 }
2442
2443 if (mdev->state.conn != C_CONNECTED) {
2444 retcode = ERR_MD_LAYOUT_CONNECTED;
2445 goto fail_ldev;
2446 }
2447
2448 change_al_layout = true;
2449 }
2450
2361 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 2451 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2362 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 2452 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2363 2453
@@ -2373,16 +2463,22 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2373 } 2463 }
2374 2464
2375 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 2465 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2376 dd = drbd_determine_dev_size(mdev, ddsf); 2466 dd = drbd_determine_dev_size(mdev, ddsf, change_al_layout ? &rs : NULL);
2377 drbd_md_sync(mdev); 2467 drbd_md_sync(mdev);
2378 put_ldev(mdev); 2468 put_ldev(mdev);
2379 if (dd == dev_size_error) { 2469 if (dd == DS_ERROR) {
2380 retcode = ERR_NOMEM_BITMAP; 2470 retcode = ERR_NOMEM_BITMAP;
2381 goto fail; 2471 goto fail;
2472 } else if (dd == DS_ERROR_SPACE_MD) {
2473 retcode = ERR_MD_LAYOUT_NO_FIT;
2474 goto fail;
2475 } else if (dd == DS_ERROR_SHRINK) {
2476 retcode = ERR_IMPLICIT_SHRINK;
2477 goto fail;
2382 } 2478 }
2383 2479
2384 if (mdev->state.conn == C_CONNECTED) { 2480 if (mdev->state.conn == C_CONNECTED) {
2385 if (dd == grew) 2481 if (dd == DS_GREW)
2386 set_bit(RESIZE_PENDING, &mdev->flags); 2482 set_bit(RESIZE_PENDING, &mdev->flags);
2387 2483
2388 drbd_send_uuids(mdev); 2484 drbd_send_uuids(mdev);
@@ -2658,7 +2754,6 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2658 const struct sib_info *sib) 2754 const struct sib_info *sib)
2659{ 2755{
2660 struct state_info *si = NULL; /* for sizeof(si->member); */ 2756 struct state_info *si = NULL; /* for sizeof(si->member); */
2661 struct net_conf *nc;
2662 struct nlattr *nla; 2757 struct nlattr *nla;
2663 int got_ldev; 2758 int got_ldev;
2664 int err = 0; 2759 int err = 0;
@@ -2688,13 +2783,19 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2688 goto nla_put_failure; 2783 goto nla_put_failure;
2689 2784
2690 rcu_read_lock(); 2785 rcu_read_lock();
2691 if (got_ldev) 2786 if (got_ldev) {
2692 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive)) 2787 struct disk_conf *disk_conf;
2693 goto nla_put_failure;
2694 2788
2695 nc = rcu_dereference(mdev->tconn->net_conf); 2789 disk_conf = rcu_dereference(mdev->ldev->disk_conf);
2696 if (nc) 2790 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
2697 err = net_conf_to_skb(skb, nc, exclude_sensitive); 2791 }
2792 if (!err) {
2793 struct net_conf *nc;
2794
2795 nc = rcu_dereference(mdev->tconn->net_conf);
2796 if (nc)
2797 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2798 }
2698 rcu_read_unlock(); 2799 rcu_read_unlock();
2699 if (err) 2800 if (err)
2700 goto nla_put_failure; 2801 goto nla_put_failure;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 4222affff488..cc29cd3bf78b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1039,6 +1039,8 @@ randomize:
1039 rcu_read_lock(); 1039 rcu_read_lock();
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) { 1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 kref_get(&mdev->kref); 1041 kref_get(&mdev->kref);
1042 rcu_read_unlock();
1043
1042 /* Prevent a race between resync-handshake and 1044 /* Prevent a race between resync-handshake and
1043 * being promoted to Primary. 1045 * being promoted to Primary.
1044 * 1046 *
@@ -1049,8 +1051,6 @@ randomize:
1049 mutex_lock(mdev->state_mutex); 1051 mutex_lock(mdev->state_mutex);
1050 mutex_unlock(mdev->state_mutex); 1052 mutex_unlock(mdev->state_mutex);
1051 1053
1052 rcu_read_unlock();
1053
1054 if (discard_my_data) 1054 if (discard_my_data)
1055 set_bit(DISCARD_MY_DATA, &mdev->flags); 1055 set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 else 1056 else
@@ -3545,7 +3545,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3545{ 3545{
3546 struct drbd_conf *mdev; 3546 struct drbd_conf *mdev;
3547 struct p_sizes *p = pi->data; 3547 struct p_sizes *p = pi->data;
3548 enum determine_dev_size dd = unchanged; 3548 enum determine_dev_size dd = DS_UNCHANGED;
3549 sector_t p_size, p_usize, my_usize; 3549 sector_t p_size, p_usize, my_usize;
3550 int ldsc = 0; /* local disk size changed */ 3550 int ldsc = 0; /* local disk size changed */
3551 enum dds_flags ddsf; 3551 enum dds_flags ddsf;
@@ -3617,9 +3617,9 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3617 3617
3618 ddsf = be16_to_cpu(p->dds_flags); 3618 ddsf = be16_to_cpu(p->dds_flags);
3619 if (get_ldev(mdev)) { 3619 if (get_ldev(mdev)) {
3620 dd = drbd_determine_dev_size(mdev, ddsf); 3620 dd = drbd_determine_dev_size(mdev, ddsf, NULL);
3621 put_ldev(mdev); 3621 put_ldev(mdev);
3622 if (dd == dev_size_error) 3622 if (dd == DS_ERROR)
3623 return -EIO; 3623 return -EIO;
3624 drbd_md_sync(mdev); 3624 drbd_md_sync(mdev);
3625 } else { 3625 } else {
@@ -3647,7 +3647,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3647 drbd_send_sizes(mdev, 0, ddsf); 3647 drbd_send_sizes(mdev, 0, ddsf);
3648 } 3648 }
3649 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || 3649 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3650 (dd == grew && mdev->state.conn == C_CONNECTED)) { 3650 (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
3651 if (mdev->state.pdsk >= D_INCONSISTENT && 3651 if (mdev->state.pdsk >= D_INCONSISTENT &&
3652 mdev->state.disk >= D_INCONSISTENT) { 3652 mdev->state.disk >= D_INCONSISTENT) {
3653 if (ddsf & DDSF_NO_RESYNC) 3653 if (ddsf & DDSF_NO_RESYNC)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 90c5be2b1d30..216d47b7e88b 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1115,8 +1115,10 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1115 drbd_thread_restart_nowait(&mdev->tconn->receiver); 1115 drbd_thread_restart_nowait(&mdev->tconn->receiver);
1116 1116
1117 /* Resume AL writing if we get a connection */ 1117 /* Resume AL writing if we get a connection */
1118 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) 1118 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1119 drbd_resume_al(mdev); 1119 drbd_resume_al(mdev);
1120 mdev->tconn->connect_cnt++;
1121 }
1120 1122
1121 /* remember last attach time so request_timer_fn() won't 1123 /* remember last attach time so request_timer_fn() won't
1122 * kill newly established sessions while we are still trying to thaw 1124 * kill newly established sessions while we are still trying to thaw
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 5af21f2db29c..6e85e21445eb 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -31,6 +31,8 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
34 36
35#include <linux/genhd.h> 37#include <linux/genhd.h>
36#include <linux/idr.h> 38#include <linux/idr.h>
@@ -39,8 +41,9 @@
39#include "rsxx_cfg.h" 41#include "rsxx_cfg.h"
40 42
41#define NO_LEGACY 0 43#define NO_LEGACY 0
44#define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
42 45
43MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); 46MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
44MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); 47MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
45MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
46MODULE_VERSION(DRIVER_VERSION); 49MODULE_VERSION(DRIVER_VERSION);
@@ -49,9 +52,282 @@ static unsigned int force_legacy = NO_LEGACY;
49module_param(force_legacy, uint, 0444); 52module_param(force_legacy, uint, 0444);
50MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts"); 53MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
51 54
55static unsigned int sync_start = 1;
56module_param(sync_start, uint, 0444);
57MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
58 "until the card startup has completed.");
59
52static DEFINE_IDA(rsxx_disk_ida); 60static DEFINE_IDA(rsxx_disk_ida);
53static DEFINE_SPINLOCK(rsxx_ida_lock); 61static DEFINE_SPINLOCK(rsxx_ida_lock);
54 62
63/* --------------------Debugfs Setup ------------------- */
64
65struct rsxx_cram {
66 u32 f_pos;
67 u32 offset;
68 void *i_private;
69};
70
71static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
72{
73 struct rsxx_cardinfo *card = m->private;
74
75 seq_printf(m, "HWID 0x%08x\n",
76 ioread32(card->regmap + HWID));
77 seq_printf(m, "SCRATCH 0x%08x\n",
78 ioread32(card->regmap + SCRATCH));
79 seq_printf(m, "IER 0x%08x\n",
80 ioread32(card->regmap + IER));
81 seq_printf(m, "IPR 0x%08x\n",
82 ioread32(card->regmap + IPR));
83 seq_printf(m, "CREG_CMD 0x%08x\n",
84 ioread32(card->regmap + CREG_CMD));
85 seq_printf(m, "CREG_ADD 0x%08x\n",
86 ioread32(card->regmap + CREG_ADD));
87 seq_printf(m, "CREG_CNT 0x%08x\n",
88 ioread32(card->regmap + CREG_CNT));
89 seq_printf(m, "CREG_STAT 0x%08x\n",
90 ioread32(card->regmap + CREG_STAT));
91 seq_printf(m, "CREG_DATA0 0x%08x\n",
92 ioread32(card->regmap + CREG_DATA0));
93 seq_printf(m, "CREG_DATA1 0x%08x\n",
94 ioread32(card->regmap + CREG_DATA1));
95 seq_printf(m, "CREG_DATA2 0x%08x\n",
96 ioread32(card->regmap + CREG_DATA2));
97 seq_printf(m, "CREG_DATA3 0x%08x\n",
98 ioread32(card->regmap + CREG_DATA3));
99 seq_printf(m, "CREG_DATA4 0x%08x\n",
100 ioread32(card->regmap + CREG_DATA4));
101 seq_printf(m, "CREG_DATA5 0x%08x\n",
102 ioread32(card->regmap + CREG_DATA5));
103 seq_printf(m, "CREG_DATA6 0x%08x\n",
104 ioread32(card->regmap + CREG_DATA6));
105 seq_printf(m, "CREG_DATA7 0x%08x\n",
106 ioread32(card->regmap + CREG_DATA7));
107 seq_printf(m, "INTR_COAL 0x%08x\n",
108 ioread32(card->regmap + INTR_COAL));
109 seq_printf(m, "HW_ERROR 0x%08x\n",
110 ioread32(card->regmap + HW_ERROR));
111 seq_printf(m, "DEBUG0 0x%08x\n",
112 ioread32(card->regmap + PCI_DEBUG0));
113 seq_printf(m, "DEBUG1 0x%08x\n",
114 ioread32(card->regmap + PCI_DEBUG1));
115 seq_printf(m, "DEBUG2 0x%08x\n",
116 ioread32(card->regmap + PCI_DEBUG2));
117 seq_printf(m, "DEBUG3 0x%08x\n",
118 ioread32(card->regmap + PCI_DEBUG3));
119 seq_printf(m, "DEBUG4 0x%08x\n",
120 ioread32(card->regmap + PCI_DEBUG4));
121 seq_printf(m, "DEBUG5 0x%08x\n",
122 ioread32(card->regmap + PCI_DEBUG5));
123 seq_printf(m, "DEBUG6 0x%08x\n",
124 ioread32(card->regmap + PCI_DEBUG6));
125 seq_printf(m, "DEBUG7 0x%08x\n",
126 ioread32(card->regmap + PCI_DEBUG7));
127 seq_printf(m, "RECONFIG 0x%08x\n",
128 ioread32(card->regmap + PCI_RECONFIG));
129
130 return 0;
131}
132
133static int rsxx_attr_stats_show(struct seq_file *m, void *p)
134{
135 struct rsxx_cardinfo *card = m->private;
136 int i;
137
138 for (i = 0; i < card->n_targets; i++) {
139 seq_printf(m, "Ctrl %d CRC Errors = %d\n",
140 i, card->ctrl[i].stats.crc_errors);
141 seq_printf(m, "Ctrl %d Hard Errors = %d\n",
142 i, card->ctrl[i].stats.hard_errors);
143 seq_printf(m, "Ctrl %d Soft Errors = %d\n",
144 i, card->ctrl[i].stats.soft_errors);
145 seq_printf(m, "Ctrl %d Writes Issued = %d\n",
146 i, card->ctrl[i].stats.writes_issued);
147 seq_printf(m, "Ctrl %d Writes Failed = %d\n",
148 i, card->ctrl[i].stats.writes_failed);
149 seq_printf(m, "Ctrl %d Reads Issued = %d\n",
150 i, card->ctrl[i].stats.reads_issued);
151 seq_printf(m, "Ctrl %d Reads Failed = %d\n",
152 i, card->ctrl[i].stats.reads_failed);
153 seq_printf(m, "Ctrl %d Reads Retried = %d\n",
154 i, card->ctrl[i].stats.reads_retried);
155 seq_printf(m, "Ctrl %d Discards Issued = %d\n",
156 i, card->ctrl[i].stats.discards_issued);
157 seq_printf(m, "Ctrl %d Discards Failed = %d\n",
158 i, card->ctrl[i].stats.discards_failed);
159 seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
160 i, card->ctrl[i].stats.dma_sw_err);
161 seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
162 i, card->ctrl[i].stats.dma_hw_fault);
163 seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
164 i, card->ctrl[i].stats.dma_cancelled);
165 seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
166 i, card->ctrl[i].stats.sw_q_depth);
167 seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
168 i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
169 }
170
171 return 0;
172}
173
174static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
175{
176 return single_open(file, rsxx_attr_stats_show, inode->i_private);
177}
178
179static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
180{
181 return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
182}
183
184static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
185 size_t cnt, loff_t *ppos)
186{
187 struct rsxx_cram *info = fp->private_data;
188 struct rsxx_cardinfo *card = info->i_private;
189 char *buf;
190 int st;
191
192 buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
193 if (!buf)
194 return -ENOMEM;
195
196 info->f_pos = (u32)*ppos + info->offset;
197
198 st = rsxx_creg_read(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
199 if (st)
200 return st;
201
202 st = copy_to_user(ubuf, buf, cnt);
203 if (st)
204 return st;
205
206 info->offset += cnt;
207
208 kfree(buf);
209
210 return cnt;
211}
212
213static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
214 size_t cnt, loff_t *ppos)
215{
216 struct rsxx_cram *info = fp->private_data;
217 struct rsxx_cardinfo *card = info->i_private;
218 char *buf;
219 int st;
220
221 buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
225 st = copy_from_user(buf, ubuf, cnt);
226 if (st)
227 return st;
228
229 info->f_pos = (u32)*ppos + info->offset;
230
231 st = rsxx_creg_write(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
232 if (st)
233 return st;
234
235 info->offset += cnt;
236
237 kfree(buf);
238
239 return cnt;
240}
241
242static int rsxx_cram_open(struct inode *inode, struct file *file)
243{
244 struct rsxx_cram *info = kzalloc(sizeof(*info), GFP_KERNEL);
245 if (!info)
246 return -ENOMEM;
247
248 info->i_private = inode->i_private;
249 info->f_pos = file->f_pos;
250 file->private_data = info;
251
252 return 0;
253}
254
255static int rsxx_cram_release(struct inode *inode, struct file *file)
256{
257 struct rsxx_cram *info = file->private_data;
258
259 if (!info)
260 return 0;
261
262 kfree(info);
263 file->private_data = NULL;
264
265 return 0;
266}
267
268static const struct file_operations debugfs_cram_fops = {
269 .owner = THIS_MODULE,
270 .open = rsxx_cram_open,
271 .read = rsxx_cram_read,
272 .write = rsxx_cram_write,
273 .release = rsxx_cram_release,
274};
275
276static const struct file_operations debugfs_stats_fops = {
277 .owner = THIS_MODULE,
278 .open = rsxx_attr_stats_open,
279 .read = seq_read,
280 .llseek = seq_lseek,
281 .release = single_release,
282};
283
284static const struct file_operations debugfs_pci_regs_fops = {
285 .owner = THIS_MODULE,
286 .open = rsxx_attr_pci_regs_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
292static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
293{
294 struct dentry *debugfs_stats;
295 struct dentry *debugfs_pci_regs;
296 struct dentry *debugfs_cram;
297
298 card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
299 if (IS_ERR_OR_NULL(card->debugfs_dir))
300 goto failed_debugfs_dir;
301
302 debugfs_stats = debugfs_create_file("stats", S_IRUGO,
303 card->debugfs_dir, card,
304 &debugfs_stats_fops);
305 if (IS_ERR_OR_NULL(debugfs_stats))
306 goto failed_debugfs_stats;
307
308 debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO,
309 card->debugfs_dir, card,
310 &debugfs_pci_regs_fops);
311 if (IS_ERR_OR_NULL(debugfs_pci_regs))
312 goto failed_debugfs_pci_regs;
313
314 debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR,
315 card->debugfs_dir, card,
316 &debugfs_cram_fops);
317 if (IS_ERR_OR_NULL(debugfs_cram))
318 goto failed_debugfs_cram;
319
320 return;
321failed_debugfs_cram:
322 debugfs_remove(debugfs_pci_regs);
323failed_debugfs_pci_regs:
324 debugfs_remove(debugfs_stats);
325failed_debugfs_stats:
326 debugfs_remove(card->debugfs_dir);
327failed_debugfs_dir:
328 card->debugfs_dir = NULL;
329}
330
55/*----------------- Interrupt Control & Handling -------------------*/ 331/*----------------- Interrupt Control & Handling -------------------*/
56 332
57static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) 333static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
@@ -163,12 +439,13 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
163 } 439 }
164 440
165 if (isr & CR_INTR_CREG) { 441 if (isr & CR_INTR_CREG) {
166 schedule_work(&card->creg_ctrl.done_work); 442 queue_work(card->creg_ctrl.creg_wq,
443 &card->creg_ctrl.done_work);
167 handled++; 444 handled++;
168 } 445 }
169 446
170 if (isr & CR_INTR_EVENT) { 447 if (isr & CR_INTR_EVENT) {
171 schedule_work(&card->event_work); 448 queue_work(card->event_wq, &card->event_work);
172 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 449 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
173 handled++; 450 handled++;
174 } 451 }
@@ -329,7 +606,7 @@ static int rsxx_eeh_frozen(struct pci_dev *dev)
329 int i; 606 int i;
330 int st; 607 int st;
331 608
332 dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); 609 dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
333 610
334 card->eeh_state = 1; 611 card->eeh_state = 1;
335 rsxx_mask_interrupts(card); 612 rsxx_mask_interrupts(card);
@@ -367,15 +644,26 @@ static void rsxx_eeh_failure(struct pci_dev *dev)
367{ 644{
368 struct rsxx_cardinfo *card = pci_get_drvdata(dev); 645 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
369 int i; 646 int i;
647 int cnt = 0;
370 648
371 dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); 649 dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
372 650
373 card->eeh_state = 1; 651 card->eeh_state = 1;
652 card->halt = 1;
374 653
375 for (i = 0; i < card->n_targets; i++) 654 for (i = 0; i < card->n_targets; i++) {
376 del_timer_sync(&card->ctrl[i].activity_timer); 655 spin_lock_bh(&card->ctrl[i].queue_lock);
656 cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
657 &card->ctrl[i].queue);
658 spin_unlock_bh(&card->ctrl[i].queue_lock);
659
660 cnt += rsxx_dma_cancel(&card->ctrl[i]);
377 661
378 rsxx_eeh_cancel_dmas(card); 662 if (cnt)
663 dev_info(CARD_TO_DEV(card),
664 "Freed %d queued DMAs on channel %d\n",
665 cnt, card->ctrl[i].id);
666 }
379} 667}
380 668
381static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) 669static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
@@ -432,7 +720,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
432 int st; 720 int st;
433 721
434 dev_warn(&dev->dev, 722 dev_warn(&dev->dev,
435 "IBM FlashSystem PCI: recovering from slot reset.\n"); 723 "IBM Flash Adapter PCI: recovering from slot reset.\n");
436 724
437 st = pci_enable_device(dev); 725 st = pci_enable_device(dev);
438 if (st) 726 if (st)
@@ -485,7 +773,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
485 &card->ctrl[i].issue_dma_work); 773 &card->ctrl[i].issue_dma_work);
486 } 774 }
487 775
488 dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); 776 dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
489 777
490 return PCI_ERS_RESULT_RECOVERED; 778 return PCI_ERS_RESULT_RECOVERED;
491 779
@@ -528,6 +816,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
528{ 816{
529 struct rsxx_cardinfo *card; 817 struct rsxx_cardinfo *card;
530 int st; 818 int st;
819 unsigned int sync_timeout;
531 820
532 dev_info(&dev->dev, "PCI-Flash SSD discovered\n"); 821 dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
533 822
@@ -610,7 +899,11 @@ static int rsxx_pci_probe(struct pci_dev *dev,
610 } 899 }
611 900
612 /************* Setup Processor Command Interface *************/ 901 /************* Setup Processor Command Interface *************/
613 rsxx_creg_setup(card); 902 st = rsxx_creg_setup(card);
903 if (st) {
904 dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
905 goto failed_creg_setup;
906 }
614 907
615 spin_lock_irq(&card->irq_lock); 908 spin_lock_irq(&card->irq_lock);
616 rsxx_enable_ier_and_isr(card, CR_INTR_CREG); 909 rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
@@ -650,6 +943,12 @@ static int rsxx_pci_probe(struct pci_dev *dev,
650 } 943 }
651 944
652 /************* Setup Card Event Handler *************/ 945 /************* Setup Card Event Handler *************/
946 card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
947 if (!card->event_wq) {
948 dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
949 goto failed_event_handler;
950 }
951
653 INIT_WORK(&card->event_work, card_event_handler); 952 INIT_WORK(&card->event_work, card_event_handler);
654 953
655 st = rsxx_setup_dev(card); 954 st = rsxx_setup_dev(card);
@@ -676,6 +975,33 @@ static int rsxx_pci_probe(struct pci_dev *dev,
676 if (st) 975 if (st)
677 dev_crit(CARD_TO_DEV(card), 976 dev_crit(CARD_TO_DEV(card),
678 "Failed issuing card startup\n"); 977 "Failed issuing card startup\n");
978 if (sync_start) {
979 sync_timeout = SYNC_START_TIMEOUT;
980
981 dev_info(CARD_TO_DEV(card),
982 "Waiting for card to startup\n");
983
984 do {
985 ssleep(1);
986 sync_timeout--;
987
988 rsxx_get_card_state(card, &card->state);
989 } while (sync_timeout &&
990 (card->state == CARD_STATE_STARTING));
991
992 if (card->state == CARD_STATE_STARTING) {
993 dev_warn(CARD_TO_DEV(card),
994 "Card startup timed out\n");
995 card->size8 = 0;
996 } else {
997 dev_info(CARD_TO_DEV(card),
998 "card state: %s\n",
999 rsxx_card_state_to_str(card->state));
1000 st = rsxx_get_card_size8(card, &card->size8);
1001 if (st)
1002 card->size8 = 0;
1003 }
1004 }
679 } else if (card->state == CARD_STATE_GOOD || 1005 } else if (card->state == CARD_STATE_GOOD ||
680 card->state == CARD_STATE_RD_ONLY_FAULT) { 1006 card->state == CARD_STATE_RD_ONLY_FAULT) {
681 st = rsxx_get_card_size8(card, &card->size8); 1007 st = rsxx_get_card_size8(card, &card->size8);
@@ -685,12 +1011,21 @@ static int rsxx_pci_probe(struct pci_dev *dev,
685 1011
686 rsxx_attach_dev(card); 1012 rsxx_attach_dev(card);
687 1013
1014 /************* Setup Debugfs *************/
1015 rsxx_debugfs_dev_new(card);
1016
688 return 0; 1017 return 0;
689 1018
690failed_create_dev: 1019failed_create_dev:
1020 destroy_workqueue(card->event_wq);
1021 card->event_wq = NULL;
1022failed_event_handler:
691 rsxx_dma_destroy(card); 1023 rsxx_dma_destroy(card);
692failed_dma_setup: 1024failed_dma_setup:
693failed_compatiblity_check: 1025failed_compatiblity_check:
1026 destroy_workqueue(card->creg_ctrl.creg_wq);
1027 card->creg_ctrl.creg_wq = NULL;
1028failed_creg_setup:
694 spin_lock_irq(&card->irq_lock); 1029 spin_lock_irq(&card->irq_lock);
695 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 1030 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
696 spin_unlock_irq(&card->irq_lock); 1031 spin_unlock_irq(&card->irq_lock);
@@ -756,6 +1091,8 @@ static void rsxx_pci_remove(struct pci_dev *dev)
756 /* Prevent work_structs from re-queuing themselves. */ 1091 /* Prevent work_structs from re-queuing themselves. */
757 card->halt = 1; 1092 card->halt = 1;
758 1093
1094 debugfs_remove_recursive(card->debugfs_dir);
1095
759 free_irq(dev->irq, card); 1096 free_irq(dev->irq, card);
760 1097
761 if (!force_legacy) 1098 if (!force_legacy)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 4b5c020a0a65..926dce9c452f 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -431,6 +431,15 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
431 *hw_stat = completion.creg_status; 431 *hw_stat = completion.creg_status;
432 432
433 if (completion.st) { 433 if (completion.st) {
434 /*
435 * This read is needed to verify that there has not been any
436 * extreme errors that might have occurred, i.e. EEH. The
437 * function iowrite32 will not detect EEH errors, so it is
438 * necessary that we recover if such an error is the reason
439 * for the timeout. This is a dummy read.
440 */
441 ioread32(card->regmap + SCRATCH);
442
434 dev_warn(CARD_TO_DEV(card), 443 dev_warn(CARD_TO_DEV(card),
435 "creg command failed(%d x%08x)\n", 444 "creg command failed(%d x%08x)\n",
436 completion.st, addr); 445 completion.st, addr);
@@ -727,6 +736,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
727{ 736{
728 card->creg_ctrl.active_cmd = NULL; 737 card->creg_ctrl.active_cmd = NULL;
729 738
739 card->creg_ctrl.creg_wq =
740 create_singlethread_workqueue(DRIVER_NAME"_creg");
741 if (!card->creg_ctrl.creg_wq)
742 return -ENOMEM;
743
730 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done); 744 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
731 mutex_init(&card->creg_ctrl.reset_lock); 745 mutex_init(&card->creg_ctrl.reset_lock);
732 INIT_LIST_HEAD(&card->creg_ctrl.queue); 746 INIT_LIST_HEAD(&card->creg_ctrl.queue);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 4346d17d2949..d7af441880be 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -155,7 +155,8 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
155 atomic_set(&meta->error, 1); 155 atomic_set(&meta->error, 1);
156 156
157 if (atomic_dec_and_test(&meta->pending_dmas)) { 157 if (atomic_dec_and_test(&meta->pending_dmas)) {
158 disk_stats_complete(card, meta->bio, meta->start_time); 158 if (!card->eeh_state && card->gendisk)
159 disk_stats_complete(card, meta->bio, meta->start_time);
159 160
160 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); 161 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
161 kmem_cache_free(bio_meta_pool, meta); 162 kmem_cache_free(bio_meta_pool, meta);
@@ -170,6 +171,12 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
170 171
171 might_sleep(); 172 might_sleep();
172 173
174 if (!card)
175 goto req_err;
176
177 if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
178 goto req_err;
179
173 if (unlikely(card->halt)) { 180 if (unlikely(card->halt)) {
174 st = -EFAULT; 181 st = -EFAULT;
175 goto req_err; 182 goto req_err;
@@ -196,7 +203,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
196 atomic_set(&bio_meta->pending_dmas, 0); 203 atomic_set(&bio_meta->pending_dmas, 0);
197 bio_meta->start_time = jiffies; 204 bio_meta->start_time = jiffies;
198 205
199 disk_stats_start(card, bio); 206 if (!unlikely(card->halt))
207 disk_stats_start(card, bio);
200 208
201 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
202 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
@@ -225,24 +233,6 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
225 return (pci_rev >= RSXX_DISCARD_SUPPORT); 233 return (pci_rev >= RSXX_DISCARD_SUPPORT);
226} 234}
227 235
228static unsigned short rsxx_get_logical_block_size(
229 struct rsxx_cardinfo *card)
230{
231 u32 capabilities = 0;
232 int st;
233
234 st = rsxx_get_card_capabilities(card, &capabilities);
235 if (st)
236 dev_warn(CARD_TO_DEV(card),
237 "Failed reading card capabilities register\n");
238
239 /* Earlier firmware did not have support for 512 byte accesses */
240 if (capabilities & CARD_CAP_SUBPAGE_WRITES)
241 return 512;
242 else
243 return RSXX_HW_BLK_SIZE;
244}
245
246int rsxx_attach_dev(struct rsxx_cardinfo *card) 236int rsxx_attach_dev(struct rsxx_cardinfo *card)
247{ 237{
248 mutex_lock(&card->dev_lock); 238 mutex_lock(&card->dev_lock);
@@ -305,7 +295,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
305 return -ENOMEM; 295 return -ENOMEM;
306 } 296 }
307 297
308 blk_size = rsxx_get_logical_block_size(card); 298 blk_size = card->config.data.block_size;
309 299
310 blk_queue_make_request(card->queue, rsxx_make_request); 300 blk_queue_make_request(card->queue, rsxx_make_request);
311 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); 301 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
@@ -347,6 +337,7 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card)
347 card->gendisk = NULL; 337 card->gendisk = NULL;
348 338
349 blk_cleanup_queue(card->queue); 339 blk_cleanup_queue(card->queue);
340 card->queue->queuedata = NULL;
350 unregister_blkdev(card->major, DRIVER_NAME); 341 unregister_blkdev(card->major, DRIVER_NAME);
351} 342}
352 343
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 0607513cfb41..bed32f16b084 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -245,6 +245,22 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
245 kmem_cache_free(rsxx_dma_pool, dma); 245 kmem_cache_free(rsxx_dma_pool, dma);
246} 246}
247 247
248int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
249 struct list_head *q)
250{
251 struct rsxx_dma *dma;
252 struct rsxx_dma *tmp;
253 int cnt = 0;
254
255 list_for_each_entry_safe(dma, tmp, q, list) {
256 list_del(&dma->list);
257 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
258 cnt++;
259 }
260
261 return cnt;
262}
263
248static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, 264static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
249 struct rsxx_dma *dma) 265 struct rsxx_dma *dma)
250{ 266{
@@ -252,9 +268,10 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
252 * Requeued DMAs go to the front of the queue so they are issued 268 * Requeued DMAs go to the front of the queue so they are issued
253 * first. 269 * first.
254 */ 270 */
255 spin_lock(&ctrl->queue_lock); 271 spin_lock_bh(&ctrl->queue_lock);
272 ctrl->stats.sw_q_depth++;
256 list_add(&dma->list, &ctrl->queue); 273 list_add(&dma->list, &ctrl->queue);
257 spin_unlock(&ctrl->queue_lock); 274 spin_unlock_bh(&ctrl->queue_lock);
258} 275}
259 276
260static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, 277static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
@@ -329,6 +346,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
329static void dma_engine_stalled(unsigned long data) 346static void dma_engine_stalled(unsigned long data)
330{ 347{
331 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; 348 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
349 int cnt;
332 350
333 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || 351 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
334 unlikely(ctrl->card->eeh_state)) 352 unlikely(ctrl->card->eeh_state))
@@ -349,18 +367,28 @@ static void dma_engine_stalled(unsigned long data)
349 "DMA channel %d has stalled, faulting interface.\n", 367 "DMA channel %d has stalled, faulting interface.\n",
350 ctrl->id); 368 ctrl->id);
351 ctrl->card->dma_fault = 1; 369 ctrl->card->dma_fault = 1;
370
371 /* Clean up the DMA queue */
372 spin_lock(&ctrl->queue_lock);
373 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
374 spin_unlock(&ctrl->queue_lock);
375
376 cnt += rsxx_dma_cancel(ctrl);
377
378 if (cnt)
379 dev_info(CARD_TO_DEV(ctrl->card),
380 "Freed %d queued DMAs on channel %d\n",
381 cnt, ctrl->id);
352 } 382 }
353} 383}
354 384
355static void rsxx_issue_dmas(struct work_struct *work) 385static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
356{ 386{
357 struct rsxx_dma_ctrl *ctrl;
358 struct rsxx_dma *dma; 387 struct rsxx_dma *dma;
359 int tag; 388 int tag;
360 int cmds_pending = 0; 389 int cmds_pending = 0;
361 struct hw_cmd *hw_cmd_buf; 390 struct hw_cmd *hw_cmd_buf;
362 391
363 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
364 hw_cmd_buf = ctrl->cmd.buf; 392 hw_cmd_buf = ctrl->cmd.buf;
365 393
366 if (unlikely(ctrl->card->halt) || 394 if (unlikely(ctrl->card->halt) ||
@@ -368,22 +396,22 @@ static void rsxx_issue_dmas(struct work_struct *work)
368 return; 396 return;
369 397
370 while (1) { 398 while (1) {
371 spin_lock(&ctrl->queue_lock); 399 spin_lock_bh(&ctrl->queue_lock);
372 if (list_empty(&ctrl->queue)) { 400 if (list_empty(&ctrl->queue)) {
373 spin_unlock(&ctrl->queue_lock); 401 spin_unlock_bh(&ctrl->queue_lock);
374 break; 402 break;
375 } 403 }
376 spin_unlock(&ctrl->queue_lock); 404 spin_unlock_bh(&ctrl->queue_lock);
377 405
378 tag = pop_tracker(ctrl->trackers); 406 tag = pop_tracker(ctrl->trackers);
379 if (tag == -1) 407 if (tag == -1)
380 break; 408 break;
381 409
382 spin_lock(&ctrl->queue_lock); 410 spin_lock_bh(&ctrl->queue_lock);
383 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); 411 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
384 list_del(&dma->list); 412 list_del(&dma->list);
385 ctrl->stats.sw_q_depth--; 413 ctrl->stats.sw_q_depth--;
386 spin_unlock(&ctrl->queue_lock); 414 spin_unlock_bh(&ctrl->queue_lock);
387 415
388 /* 416 /*
389 * This will catch any DMAs that slipped in right before the 417 * This will catch any DMAs that slipped in right before the
@@ -440,9 +468,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
440 } 468 }
441} 469}
442 470
443static void rsxx_dma_done(struct work_struct *work) 471static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
444{ 472{
445 struct rsxx_dma_ctrl *ctrl;
446 struct rsxx_dma *dma; 473 struct rsxx_dma *dma;
447 unsigned long flags; 474 unsigned long flags;
448 u16 count; 475 u16 count;
@@ -450,7 +477,6 @@ static void rsxx_dma_done(struct work_struct *work)
450 u8 tag; 477 u8 tag;
451 struct hw_status *hw_st_buf; 478 struct hw_status *hw_st_buf;
452 479
453 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
454 hw_st_buf = ctrl->status.buf; 480 hw_st_buf = ctrl->status.buf;
455 481
456 if (unlikely(ctrl->card->halt) || 482 if (unlikely(ctrl->card->halt) ||
@@ -520,33 +546,32 @@ static void rsxx_dma_done(struct work_struct *work)
520 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); 546 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
521 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); 547 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
522 548
523 spin_lock(&ctrl->queue_lock); 549 spin_lock_bh(&ctrl->queue_lock);
524 if (ctrl->stats.sw_q_depth) 550 if (ctrl->stats.sw_q_depth)
525 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); 551 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
526 spin_unlock(&ctrl->queue_lock); 552 spin_unlock_bh(&ctrl->queue_lock);
527} 553}
528 554
529static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card, 555static void rsxx_schedule_issue(struct work_struct *work)
530 struct list_head *q)
531{ 556{
532 struct rsxx_dma *dma; 557 struct rsxx_dma_ctrl *ctrl;
533 struct rsxx_dma *tmp;
534 int cnt = 0;
535 558
536 list_for_each_entry_safe(dma, tmp, q, list) { 559 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
537 list_del(&dma->list);
538 560
539 if (dma->dma_addr) 561 mutex_lock(&ctrl->work_lock);
540 pci_unmap_page(card->dev, dma->dma_addr, 562 rsxx_issue_dmas(ctrl);
541 get_dma_size(dma), 563 mutex_unlock(&ctrl->work_lock);
542 (dma->cmd == HW_CMD_BLK_WRITE) ? 564}
543 PCI_DMA_TODEVICE :
544 PCI_DMA_FROMDEVICE);
545 kmem_cache_free(rsxx_dma_pool, dma);
546 cnt++;
547 }
548 565
549 return cnt; 566static void rsxx_schedule_done(struct work_struct *work)
567{
568 struct rsxx_dma_ctrl *ctrl;
569
570 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
571
572 mutex_lock(&ctrl->work_lock);
573 rsxx_dma_done(ctrl);
574 mutex_unlock(&ctrl->work_lock);
550} 575}
551 576
552static int rsxx_queue_discard(struct rsxx_cardinfo *card, 577static int rsxx_queue_discard(struct rsxx_cardinfo *card,
@@ -698,10 +723,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
698 723
699 for (i = 0; i < card->n_targets; i++) { 724 for (i = 0; i < card->n_targets; i++) {
700 if (!list_empty(&dma_list[i])) { 725 if (!list_empty(&dma_list[i])) {
701 spin_lock(&card->ctrl[i].queue_lock); 726 spin_lock_bh(&card->ctrl[i].queue_lock);
702 card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; 727 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
703 list_splice_tail(&dma_list[i], &card->ctrl[i].queue); 728 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
704 spin_unlock(&card->ctrl[i].queue_lock); 729 spin_unlock_bh(&card->ctrl[i].queue_lock);
705 730
706 queue_work(card->ctrl[i].issue_wq, 731 queue_work(card->ctrl[i].issue_wq,
707 &card->ctrl[i].issue_dma_work); 732 &card->ctrl[i].issue_dma_work);
@@ -711,8 +736,11 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
711 return 0; 736 return 0;
712 737
713bvec_err: 738bvec_err:
714 for (i = 0; i < card->n_targets; i++) 739 for (i = 0; i < card->n_targets; i++) {
715 rsxx_cleanup_dma_queue(card, &dma_list[i]); 740 spin_lock_bh(&card->ctrl[i].queue_lock);
741 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
742 spin_unlock_bh(&card->ctrl[i].queue_lock);
743 }
716 744
717 return st; 745 return st;
718} 746}
@@ -780,6 +808,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
780 spin_lock_init(&ctrl->trackers->lock); 808 spin_lock_init(&ctrl->trackers->lock);
781 809
782 spin_lock_init(&ctrl->queue_lock); 810 spin_lock_init(&ctrl->queue_lock);
811 mutex_init(&ctrl->work_lock);
783 INIT_LIST_HEAD(&ctrl->queue); 812 INIT_LIST_HEAD(&ctrl->queue);
784 813
785 setup_timer(&ctrl->activity_timer, dma_engine_stalled, 814 setup_timer(&ctrl->activity_timer, dma_engine_stalled,
@@ -793,8 +822,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
793 if (!ctrl->done_wq) 822 if (!ctrl->done_wq)
794 return -ENOMEM; 823 return -ENOMEM;
795 824
796 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); 825 INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
797 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); 826 INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
798 827
799 st = rsxx_hw_buffers_init(dev, ctrl); 828 st = rsxx_hw_buffers_init(dev, ctrl);
800 if (st) 829 if (st)
@@ -918,13 +947,30 @@ failed_dma_setup:
918 return st; 947 return st;
919} 948}
920 949
950int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
951{
952 struct rsxx_dma *dma;
953 int i;
954 int cnt = 0;
955
956 /* Clean up issued DMAs */
957 for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
958 dma = get_tracker_dma(ctrl->trackers, i);
959 if (dma) {
960 atomic_dec(&ctrl->stats.hw_q_depth);
961 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
962 push_tracker(ctrl->trackers, i);
963 cnt++;
964 }
965 }
966
967 return cnt;
968}
921 969
922void rsxx_dma_destroy(struct rsxx_cardinfo *card) 970void rsxx_dma_destroy(struct rsxx_cardinfo *card)
923{ 971{
924 struct rsxx_dma_ctrl *ctrl; 972 struct rsxx_dma_ctrl *ctrl;
925 struct rsxx_dma *dma; 973 int i;
926 int i, j;
927 int cnt = 0;
928 974
929 for (i = 0; i < card->n_targets; i++) { 975 for (i = 0; i < card->n_targets; i++) {
930 ctrl = &card->ctrl[i]; 976 ctrl = &card->ctrl[i];
@@ -943,33 +989,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
943 del_timer_sync(&ctrl->activity_timer); 989 del_timer_sync(&ctrl->activity_timer);
944 990
945 /* Clean up the DMA queue */ 991 /* Clean up the DMA queue */
946 spin_lock(&ctrl->queue_lock); 992 spin_lock_bh(&ctrl->queue_lock);
947 cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); 993 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
948 spin_unlock(&ctrl->queue_lock); 994 spin_unlock_bh(&ctrl->queue_lock);
949
950 if (cnt)
951 dev_info(CARD_TO_DEV(card),
952 "Freed %d queued DMAs on channel %d\n",
953 cnt, i);
954
955 /* Clean up issued DMAs */
956 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
957 dma = get_tracker_dma(ctrl->trackers, j);
958 if (dma) {
959 pci_unmap_page(card->dev, dma->dma_addr,
960 get_dma_size(dma),
961 (dma->cmd == HW_CMD_BLK_WRITE) ?
962 PCI_DMA_TODEVICE :
963 PCI_DMA_FROMDEVICE);
964 kmem_cache_free(rsxx_dma_pool, dma);
965 cnt++;
966 }
967 }
968 995
969 if (cnt) 996 rsxx_dma_cancel(ctrl);
970 dev_info(CARD_TO_DEV(card),
971 "Freed %d pending DMAs on channel %d\n",
972 cnt, i);
973 997
974 vfree(ctrl->trackers); 998 vfree(ctrl->trackers);
975 999
@@ -1013,7 +1037,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1013 cnt++; 1037 cnt++;
1014 } 1038 }
1015 1039
1016 spin_lock(&card->ctrl[i].queue_lock); 1040 spin_lock_bh(&card->ctrl[i].queue_lock);
1017 list_splice(&issued_dmas[i], &card->ctrl[i].queue); 1041 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1018 1042
1019 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); 1043 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
@@ -1028,7 +1052,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1028 PCI_DMA_TODEVICE : 1052 PCI_DMA_TODEVICE :
1029 PCI_DMA_FROMDEVICE); 1053 PCI_DMA_FROMDEVICE);
1030 } 1054 }
1031 spin_unlock(&card->ctrl[i].queue_lock); 1055 spin_unlock_bh(&card->ctrl[i].queue_lock);
1032 } 1056 }
1033 1057
1034 kfree(issued_dmas); 1058 kfree(issued_dmas);
@@ -1036,30 +1060,13 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1036 return 0; 1060 return 0;
1037} 1061}
1038 1062
1039void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
1040{
1041 struct rsxx_dma *dma;
1042 struct rsxx_dma *tmp;
1043 int i;
1044
1045 for (i = 0; i < card->n_targets; i++) {
1046 spin_lock(&card->ctrl[i].queue_lock);
1047 list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
1048 list_del(&dma->list);
1049
1050 rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
1051 }
1052 spin_unlock(&card->ctrl[i].queue_lock);
1053 }
1054}
1055
1056int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) 1063int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1057{ 1064{
1058 struct rsxx_dma *dma; 1065 struct rsxx_dma *dma;
1059 int i; 1066 int i;
1060 1067
1061 for (i = 0; i < card->n_targets; i++) { 1068 for (i = 0; i < card->n_targets; i++) {
1062 spin_lock(&card->ctrl[i].queue_lock); 1069 spin_lock_bh(&card->ctrl[i].queue_lock);
1063 list_for_each_entry(dma, &card->ctrl[i].queue, list) { 1070 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1064 dma->dma_addr = pci_map_page(card->dev, dma->page, 1071 dma->dma_addr = pci_map_page(card->dev, dma->page,
1065 dma->pg_off, get_dma_size(dma), 1072 dma->pg_off, get_dma_size(dma),
@@ -1067,12 +1074,12 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1067 PCI_DMA_TODEVICE : 1074 PCI_DMA_TODEVICE :
1068 PCI_DMA_FROMDEVICE); 1075 PCI_DMA_FROMDEVICE);
1069 if (!dma->dma_addr) { 1076 if (!dma->dma_addr) {
1070 spin_unlock(&card->ctrl[i].queue_lock); 1077 spin_unlock_bh(&card->ctrl[i].queue_lock);
1071 kmem_cache_free(rsxx_dma_pool, dma); 1078 kmem_cache_free(rsxx_dma_pool, dma);
1072 return -ENOMEM; 1079 return -ENOMEM;
1073 } 1080 }
1074 } 1081 }
1075 spin_unlock(&card->ctrl[i].queue_lock); 1082 spin_unlock_bh(&card->ctrl[i].queue_lock);
1076 } 1083 }
1077 1084
1078 return 0; 1085 return 0;
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 382e8bf5c03b..5ad5055a4104 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -39,6 +39,7 @@
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/timer.h> 40#include <linux/timer.h>
41#include <linux/ioctl.h> 41#include <linux/ioctl.h>
42#include <linux/delay.h>
42 43
43#include "rsxx.h" 44#include "rsxx.h"
44#include "rsxx_cfg.h" 45#include "rsxx_cfg.h"
@@ -114,6 +115,7 @@ struct rsxx_dma_ctrl {
114 struct timer_list activity_timer; 115 struct timer_list activity_timer;
115 struct dma_tracker_list *trackers; 116 struct dma_tracker_list *trackers;
116 struct rsxx_dma_stats stats; 117 struct rsxx_dma_stats stats;
118 struct mutex work_lock;
117}; 119};
118 120
119struct rsxx_cardinfo { 121struct rsxx_cardinfo {
@@ -134,6 +136,7 @@ struct rsxx_cardinfo {
134 spinlock_t lock; 136 spinlock_t lock;
135 bool active; 137 bool active;
136 struct creg_cmd *active_cmd; 138 struct creg_cmd *active_cmd;
139 struct workqueue_struct *creg_wq;
137 struct work_struct done_work; 140 struct work_struct done_work;
138 struct list_head queue; 141 struct list_head queue;
139 unsigned int q_depth; 142 unsigned int q_depth;
@@ -154,6 +157,7 @@ struct rsxx_cardinfo {
154 int buf_len; 157 int buf_len;
155 } log; 158 } log;
156 159
160 struct workqueue_struct *event_wq;
157 struct work_struct event_work; 161 struct work_struct event_work;
158 unsigned int state; 162 unsigned int state;
159 u64 size8; 163 u64 size8;
@@ -181,6 +185,8 @@ struct rsxx_cardinfo {
181 185
182 int n_targets; 186 int n_targets;
183 struct rsxx_dma_ctrl *ctrl; 187 struct rsxx_dma_ctrl *ctrl;
188
189 struct dentry *debugfs_dir;
184}; 190};
185 191
186enum rsxx_pci_regmap { 192enum rsxx_pci_regmap {
@@ -283,6 +289,7 @@ enum rsxx_creg_addr {
283 CREG_ADD_CAPABILITIES = 0x80001050, 289 CREG_ADD_CAPABILITIES = 0x80001050,
284 CREG_ADD_LOG = 0x80002000, 290 CREG_ADD_LOG = 0x80002000,
285 CREG_ADD_NUM_TARGETS = 0x80003000, 291 CREG_ADD_NUM_TARGETS = 0x80003000,
292 CREG_ADD_CRAM = 0xA0000000,
286 CREG_ADD_CONFIG = 0xB0000000, 293 CREG_ADD_CONFIG = 0xB0000000,
287}; 294};
288 295
@@ -372,6 +379,8 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
372int rsxx_dma_setup(struct rsxx_cardinfo *card); 379int rsxx_dma_setup(struct rsxx_cardinfo *card);
373void rsxx_dma_destroy(struct rsxx_cardinfo *card); 380void rsxx_dma_destroy(struct rsxx_cardinfo *card);
374int rsxx_dma_init(void); 381int rsxx_dma_init(void);
382int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q);
383int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
375void rsxx_dma_cleanup(void); 384void rsxx_dma_cleanup(void);
376void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); 385void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
377int rsxx_dma_configure(struct rsxx_cardinfo *card); 386int rsxx_dma_configure(struct rsxx_cardinfo *card);
@@ -382,7 +391,6 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
382 void *cb_data); 391 void *cb_data);
383int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); 392int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
384int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); 393int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
385void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
386int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); 394int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
387 395
388/***** cregs.c *****/ 396/***** cregs.c *****/
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index dd5b2fed97e9..bf4b9d282c04 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -50,110 +50,118 @@
50#include "common.h" 50#include "common.h"
51 51
52/* 52/*
53 * These are rather arbitrary. They are fairly large because adjacent requests 53 * Maximum number of unused free pages to keep in the internal buffer.
54 * pulled from a communication ring are quite likely to end up being part of 54 * Setting this to a value too low will reduce memory used in each backend,
55 * the same scatter/gather request at the disc. 55 * but can have a performance penalty.
56 * 56 *
57 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** 57 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58 * 58 * be set to a lower value that might degrade performance on some intensive
59 * This will increase the chances of being able to write whole tracks. 59 * IO workloads.
60 * 64 should be enough to keep us competitive with Linux.
61 */ 60 */
62static int xen_blkif_reqs = 64;
63module_param_named(reqs, xen_blkif_reqs, int, 0);
64MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
65 61
66/* Run-time switchable: /sys/module/blkback/parameters/ */ 62static int xen_blkif_max_buffer_pages = 1024;
67static unsigned int log_stats; 63module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
68module_param(log_stats, int, 0644); 64MODULE_PARM_DESC(max_buffer_pages,
65"Maximum number of free pages to keep in each block backend buffer");
69 66
70/* 67/*
71 * Each outstanding request that we've passed to the lower device layers has a 68 * Maximum number of grants to map persistently in blkback. For maximum
72 * 'pending_req' allocated to it. Each buffer_head that completes decrements 69 * performance this should be the total numbers of grants that can be used
73 * the pendcnt towards zero. When it hits zero, the specified domain has a 70 * to fill the ring, but since this might become too high, specially with
74 * response queued for it, with the saved 'id' passed back. 71 * the use of indirect descriptors, we set it to a value that provides good
72 * performance without using too much memory.
73 *
74 * When the list of persistent grants is full we clean it up using a LRU
75 * algorithm.
75 */ 76 */
76struct pending_req {
77 struct xen_blkif *blkif;
78 u64 id;
79 int nr_pages;
80 atomic_t pendcnt;
81 unsigned short operation;
82 int status;
83 struct list_head free_list;
84 DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
85};
86 77
87#define BLKBACK_INVALID_HANDLE (~0) 78static int xen_blkif_max_pgrants = 1056;
79module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80MODULE_PARM_DESC(max_persistent_grants,
81 "Maximum number of grants to map persistently");
88 82
89struct xen_blkbk { 83/*
90 struct pending_req *pending_reqs; 84 * The LRU mechanism to clean the lists of persistent grants needs to
91 /* List of all 'pending_req' available */ 85 * be executed periodically. The time interval between consecutive executions
92 struct list_head pending_free; 86 * of the purge mechanism is set in ms.
93 /* And its spinlock. */ 87 */
94 spinlock_t pending_free_lock; 88#define LRU_INTERVAL 100
95 wait_queue_head_t pending_free_wq;
96 /* The list of all pages that are available. */
97 struct page **pending_pages;
98 /* And the grant handles that are available. */
99 grant_handle_t *pending_grant_handles;
100};
101
102static struct xen_blkbk *blkbk;
103 89
104/* 90/*
105 * Maximum number of grant pages that can be mapped in blkback. 91 * When the persistent grants list is full we will remove unused grants
106 * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of 92 * from the list. The percent number of grants to be removed at each LRU
107 * pages that blkback will persistently map. 93 * execution.
108 * Currently, this is:
109 * RING_SIZE = 32 (for all known ring types)
110 * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
111 * sizeof(struct persistent_gnt) = 48
112 * So the maximum memory used to store the grants is:
113 * 32 * 11 * 48 = 16896 bytes
114 */ 94 */
115static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol) 95#define LRU_PERCENT_CLEAN 5
96
97/* Run-time switchable: /sys/module/blkback/parameters/ */
98static unsigned int log_stats;
99module_param(log_stats, int, 0644);
100
101#define BLKBACK_INVALID_HANDLE (~0)
102
103/* Number of free pages to remove on each call to free_xenballooned_pages */
104#define NUM_BATCH_FREE_PAGES 10
105
106static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
116{ 107{
117 switch (protocol) { 108 unsigned long flags;
118 case BLKIF_PROTOCOL_NATIVE: 109
119 return __CONST_RING_SIZE(blkif, PAGE_SIZE) * 110 spin_lock_irqsave(&blkif->free_pages_lock, flags);
120 BLKIF_MAX_SEGMENTS_PER_REQUEST; 111 if (list_empty(&blkif->free_pages)) {
121 case BLKIF_PROTOCOL_X86_32: 112 BUG_ON(blkif->free_pages_num != 0);
122 return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) * 113 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
123 BLKIF_MAX_SEGMENTS_PER_REQUEST; 114 return alloc_xenballooned_pages(1, page, false);
124 case BLKIF_PROTOCOL_X86_64:
125 return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) *
126 BLKIF_MAX_SEGMENTS_PER_REQUEST;
127 default:
128 BUG();
129 } 115 }
116 BUG_ON(blkif->free_pages_num == 0);
117 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118 list_del(&page[0]->lru);
119 blkif->free_pages_num--;
120 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
121
130 return 0; 122 return 0;
131} 123}
132 124
133 125static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
134/* 126 int num)
135 * Little helpful macro to figure out the index and virtual address of the
136 * pending_pages[..]. For each 'pending_req' we have have up to
137 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
138 * 10 and would index in the pending_pages[..].
139 */
140static inline int vaddr_pagenr(struct pending_req *req, int seg)
141{ 127{
142 return (req - blkbk->pending_reqs) * 128 unsigned long flags;
143 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; 129 int i;
144}
145 130
146#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] 131 spin_lock_irqsave(&blkif->free_pages_lock, flags);
132 for (i = 0; i < num; i++)
133 list_add(&page[i]->lru, &blkif->free_pages);
134 blkif->free_pages_num += num;
135 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
136}
147 137
148static inline unsigned long vaddr(struct pending_req *req, int seg) 138static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
149{ 139{
150 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); 140 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
151 return (unsigned long)pfn_to_kaddr(pfn); 141 struct page *page[NUM_BATCH_FREE_PAGES];
152} 142 unsigned int num_pages = 0;
143 unsigned long flags;
153 144
154#define pending_handle(_req, _seg) \ 145 spin_lock_irqsave(&blkif->free_pages_lock, flags);
155 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) 146 while (blkif->free_pages_num > num) {
147 BUG_ON(list_empty(&blkif->free_pages));
148 page[num_pages] = list_first_entry(&blkif->free_pages,
149 struct page, lru);
150 list_del(&page[num_pages]->lru);
151 blkif->free_pages_num--;
152 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 free_xenballooned_pages(num_pages, page);
155 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 num_pages = 0;
157 }
158 }
159 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 if (num_pages != 0)
161 free_xenballooned_pages(num_pages, page);
162}
156 163
164#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
157 165
158static int do_block_io_op(struct xen_blkif *blkif); 166static int do_block_io_op(struct xen_blkif *blkif);
159static int dispatch_rw_block_io(struct xen_blkif *blkif, 167static int dispatch_rw_block_io(struct xen_blkif *blkif,
@@ -170,13 +178,29 @@ static void make_response(struct xen_blkif *blkif, u64 id,
170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) 178 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
171 179
172 180
173static void add_persistent_gnt(struct rb_root *root, 181/*
182 * We don't need locking around the persistent grant helpers
183 * because blkback uses a single-thread for each backed, so we
184 * can be sure that this functions will never be called recursively.
185 *
186 * The only exception to that is put_persistent_grant, that can be called
187 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188 * bit operations to modify the flags of a persistent grant and to count
189 * the number of used grants.
190 */
191static int add_persistent_gnt(struct xen_blkif *blkif,
174 struct persistent_gnt *persistent_gnt) 192 struct persistent_gnt *persistent_gnt)
175{ 193{
176 struct rb_node **new = &(root->rb_node), *parent = NULL; 194 struct rb_node **new = NULL, *parent = NULL;
177 struct persistent_gnt *this; 195 struct persistent_gnt *this;
178 196
197 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198 if (!blkif->vbd.overflow_max_grants)
199 blkif->vbd.overflow_max_grants = 1;
200 return -EBUSY;
201 }
179 /* Figure out where to put new node */ 202 /* Figure out where to put new node */
203 new = &blkif->persistent_gnts.rb_node;
180 while (*new) { 204 while (*new) {
181 this = container_of(*new, struct persistent_gnt, node); 205 this = container_of(*new, struct persistent_gnt, node);
182 206
@@ -186,22 +210,28 @@ static void add_persistent_gnt(struct rb_root *root,
186 else if (persistent_gnt->gnt > this->gnt) 210 else if (persistent_gnt->gnt > this->gnt)
187 new = &((*new)->rb_right); 211 new = &((*new)->rb_right);
188 else { 212 else {
189 pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n"); 213 pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
190 BUG(); 214 return -EINVAL;
191 } 215 }
192 } 216 }
193 217
218 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
194 /* Add new node and rebalance tree. */ 220 /* Add new node and rebalance tree. */
195 rb_link_node(&(persistent_gnt->node), parent, new); 221 rb_link_node(&(persistent_gnt->node), parent, new);
196 rb_insert_color(&(persistent_gnt->node), root); 222 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223 blkif->persistent_gnt_c++;
224 atomic_inc(&blkif->persistent_gnt_in_use);
225 return 0;
197} 226}
198 227
199static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, 228static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
200 grant_ref_t gref) 229 grant_ref_t gref)
201{ 230{
202 struct persistent_gnt *data; 231 struct persistent_gnt *data;
203 struct rb_node *node = root->rb_node; 232 struct rb_node *node = NULL;
204 233
234 node = blkif->persistent_gnts.rb_node;
205 while (node) { 235 while (node) {
206 data = container_of(node, struct persistent_gnt, node); 236 data = container_of(node, struct persistent_gnt, node);
207 237
@@ -209,13 +239,31 @@ static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
209 node = node->rb_left; 239 node = node->rb_left;
210 else if (gref > data->gnt) 240 else if (gref > data->gnt)
211 node = node->rb_right; 241 node = node->rb_right;
212 else 242 else {
243 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245 return NULL;
246 }
247 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248 atomic_inc(&blkif->persistent_gnt_in_use);
213 return data; 249 return data;
250 }
214 } 251 }
215 return NULL; 252 return NULL;
216} 253}
217 254
218static void free_persistent_gnts(struct rb_root *root, unsigned int num) 255static void put_persistent_gnt(struct xen_blkif *blkif,
256 struct persistent_gnt *persistent_gnt)
257{
258 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259 pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262 atomic_dec(&blkif->persistent_gnt_in_use);
263}
264
265static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266 unsigned int num)
219{ 267{
220 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 268 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
221 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 269 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -240,7 +288,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
240 ret = gnttab_unmap_refs(unmap, NULL, pages, 288 ret = gnttab_unmap_refs(unmap, NULL, pages,
241 segs_to_unmap); 289 segs_to_unmap);
242 BUG_ON(ret); 290 BUG_ON(ret);
243 free_xenballooned_pages(segs_to_unmap, pages); 291 put_free_pages(blkif, pages, segs_to_unmap);
244 segs_to_unmap = 0; 292 segs_to_unmap = 0;
245 } 293 }
246 294
@@ -251,21 +299,148 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
251 BUG_ON(num != 0); 299 BUG_ON(num != 0);
252} 300}
253 301
302static void unmap_purged_grants(struct work_struct *work)
303{
304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
306 struct persistent_gnt *persistent_gnt;
307 int ret, segs_to_unmap = 0;
308 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
309
310 while(!list_empty(&blkif->persistent_purge_list)) {
311 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
312 struct persistent_gnt,
313 remove_node);
314 list_del(&persistent_gnt->remove_node);
315
316 gnttab_set_unmap_op(&unmap[segs_to_unmap],
317 vaddr(persistent_gnt->page),
318 GNTMAP_host_map,
319 persistent_gnt->handle);
320
321 pages[segs_to_unmap] = persistent_gnt->page;
322
323 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
324 ret = gnttab_unmap_refs(unmap, NULL, pages,
325 segs_to_unmap);
326 BUG_ON(ret);
327 put_free_pages(blkif, pages, segs_to_unmap);
328 segs_to_unmap = 0;
329 }
330 kfree(persistent_gnt);
331 }
332 if (segs_to_unmap > 0) {
333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
334 BUG_ON(ret);
335 put_free_pages(blkif, pages, segs_to_unmap);
336 }
337}
338
339static void purge_persistent_gnt(struct xen_blkif *blkif)
340{
341 struct persistent_gnt *persistent_gnt;
342 struct rb_node *n;
343 unsigned int num_clean, total;
344 bool scan_used = false, clean_used = false;
345 struct rb_root *root;
346
347 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
348 (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
349 !blkif->vbd.overflow_max_grants)) {
350 return;
351 }
352
353 if (work_pending(&blkif->persistent_purge_work)) {
354 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
355 return;
356 }
357
358 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
359 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
360 num_clean = min(blkif->persistent_gnt_c, num_clean);
361 if ((num_clean == 0) ||
362 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
363 return;
364
365 /*
366 * At this point, we can assure that there will be no calls
367 * to get_persistent_grant (because we are executing this code from
368 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
369 * which means that the number of currently used grants will go down,
370 * but never up, so we will always be able to remove the requested
371 * number of grants.
372 */
373
374 total = num_clean;
375
376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377
378 INIT_LIST_HEAD(&blkif->persistent_purge_list);
379 root = &blkif->persistent_gnts;
380purge_list:
381 foreach_grant_safe(persistent_gnt, n, root, node) {
382 BUG_ON(persistent_gnt->handle ==
383 BLKBACK_INVALID_HANDLE);
384
385 if (clean_used) {
386 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
387 continue;
388 }
389
390 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
391 continue;
392 if (!scan_used &&
393 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
394 continue;
395
396 rb_erase(&persistent_gnt->node, root);
397 list_add(&persistent_gnt->remove_node,
398 &blkif->persistent_purge_list);
399 if (--num_clean == 0)
400 goto finished;
401 }
402 /*
403 * If we get here it means we also need to start cleaning
404 * grants that were used since last purge in order to cope
405 * with the requested num
406 */
407 if (!scan_used && !clean_used) {
408 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
409 scan_used = true;
410 goto purge_list;
411 }
412finished:
413 if (!clean_used) {
414 pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
415 clean_used = true;
416 goto purge_list;
417 }
418
419 blkif->persistent_gnt_c -= (total - num_clean);
420 blkif->vbd.overflow_max_grants = 0;
421
422 /* We can defer this work */
423 INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
424 schedule_work(&blkif->persistent_purge_work);
425 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
426 return;
427}
428
254/* 429/*
255 * Retrieve from the 'pending_reqs' a free pending_req structure to be used. 430 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
256 */ 431 */
257static struct pending_req *alloc_req(void) 432static struct pending_req *alloc_req(struct xen_blkif *blkif)
258{ 433{
259 struct pending_req *req = NULL; 434 struct pending_req *req = NULL;
260 unsigned long flags; 435 unsigned long flags;
261 436
262 spin_lock_irqsave(&blkbk->pending_free_lock, flags); 437 spin_lock_irqsave(&blkif->pending_free_lock, flags);
263 if (!list_empty(&blkbk->pending_free)) { 438 if (!list_empty(&blkif->pending_free)) {
264 req = list_entry(blkbk->pending_free.next, struct pending_req, 439 req = list_entry(blkif->pending_free.next, struct pending_req,
265 free_list); 440 free_list);
266 list_del(&req->free_list); 441 list_del(&req->free_list);
267 } 442 }
268 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); 443 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
269 return req; 444 return req;
270} 445}
271 446
@@ -273,17 +448,17 @@ static struct pending_req *alloc_req(void)
273 * Return the 'pending_req' structure back to the freepool. We also 448 * Return the 'pending_req' structure back to the freepool. We also
274 * wake up the thread if it was waiting for a free page. 449 * wake up the thread if it was waiting for a free page.
275 */ 450 */
276static void free_req(struct pending_req *req) 451static void free_req(struct xen_blkif *blkif, struct pending_req *req)
277{ 452{
278 unsigned long flags; 453 unsigned long flags;
279 int was_empty; 454 int was_empty;
280 455
281 spin_lock_irqsave(&blkbk->pending_free_lock, flags); 456 spin_lock_irqsave(&blkif->pending_free_lock, flags);
282 was_empty = list_empty(&blkbk->pending_free); 457 was_empty = list_empty(&blkif->pending_free);
283 list_add(&req->free_list, &blkbk->pending_free); 458 list_add(&req->free_list, &blkif->pending_free);
284 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); 459 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
285 if (was_empty) 460 if (was_empty)
286 wake_up(&blkbk->pending_free_wq); 461 wake_up(&blkif->pending_free_wq);
287} 462}
288 463
289/* 464/*
@@ -382,10 +557,12 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
382static void print_stats(struct xen_blkif *blkif) 557static void print_stats(struct xen_blkif *blkif)
383{ 558{
384 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" 559 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
385 " | ds %4llu\n", 560 " | ds %4llu | pg: %4u/%4d\n",
386 current->comm, blkif->st_oo_req, 561 current->comm, blkif->st_oo_req,
387 blkif->st_rd_req, blkif->st_wr_req, 562 blkif->st_rd_req, blkif->st_wr_req,
388 blkif->st_f_req, blkif->st_ds_req); 563 blkif->st_f_req, blkif->st_ds_req,
564 blkif->persistent_gnt_c,
565 xen_blkif_max_pgrants);
389 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); 566 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
390 blkif->st_rd_req = 0; 567 blkif->st_rd_req = 0;
391 blkif->st_wr_req = 0; 568 blkif->st_wr_req = 0;
@@ -397,6 +574,8 @@ int xen_blkif_schedule(void *arg)
397{ 574{
398 struct xen_blkif *blkif = arg; 575 struct xen_blkif *blkif = arg;
399 struct xen_vbd *vbd = &blkif->vbd; 576 struct xen_vbd *vbd = &blkif->vbd;
577 unsigned long timeout;
578 int ret;
400 579
401 xen_blkif_get(blkif); 580 xen_blkif_get(blkif);
402 581
@@ -406,27 +585,52 @@ int xen_blkif_schedule(void *arg)
406 if (unlikely(vbd->size != vbd_sz(vbd))) 585 if (unlikely(vbd->size != vbd_sz(vbd)))
407 xen_vbd_resize(blkif); 586 xen_vbd_resize(blkif);
408 587
409 wait_event_interruptible( 588 timeout = msecs_to_jiffies(LRU_INTERVAL);
589
590 timeout = wait_event_interruptible_timeout(
410 blkif->wq, 591 blkif->wq,
411 blkif->waiting_reqs || kthread_should_stop()); 592 blkif->waiting_reqs || kthread_should_stop(),
412 wait_event_interruptible( 593 timeout);
413 blkbk->pending_free_wq, 594 if (timeout == 0)
414 !list_empty(&blkbk->pending_free) || 595 goto purge_gnt_list;
415 kthread_should_stop()); 596 timeout = wait_event_interruptible_timeout(
597 blkif->pending_free_wq,
598 !list_empty(&blkif->pending_free) ||
599 kthread_should_stop(),
600 timeout);
601 if (timeout == 0)
602 goto purge_gnt_list;
416 603
417 blkif->waiting_reqs = 0; 604 blkif->waiting_reqs = 0;
418 smp_mb(); /* clear flag *before* checking for work */ 605 smp_mb(); /* clear flag *before* checking for work */
419 606
420 if (do_block_io_op(blkif)) 607 ret = do_block_io_op(blkif);
608 if (ret > 0)
421 blkif->waiting_reqs = 1; 609 blkif->waiting_reqs = 1;
610 if (ret == -EACCES)
611 wait_event_interruptible(blkif->shutdown_wq,
612 kthread_should_stop());
613
614purge_gnt_list:
615 if (blkif->vbd.feature_gnt_persistent &&
616 time_after(jiffies, blkif->next_lru)) {
617 purge_persistent_gnt(blkif);
618 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
619 }
620
621 /* Shrink if we have more than xen_blkif_max_buffer_pages */
622 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
422 623
423 if (log_stats && time_after(jiffies, blkif->st_print)) 624 if (log_stats && time_after(jiffies, blkif->st_print))
424 print_stats(blkif); 625 print_stats(blkif);
425 } 626 }
426 627
628 /* Since we are shutting down remove all pages from the buffer */
629 shrink_free_pagepool(blkif, 0 /* All */);
630
427 /* Free all persistent grant pages */ 631 /* Free all persistent grant pages */
428 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) 632 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
429 free_persistent_gnts(&blkif->persistent_gnts, 633 free_persistent_gnts(blkif, &blkif->persistent_gnts,
430 blkif->persistent_gnt_c); 634 blkif->persistent_gnt_c);
431 635
432 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 636 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
@@ -441,148 +645,98 @@ int xen_blkif_schedule(void *arg)
441 return 0; 645 return 0;
442} 646}
443 647
444struct seg_buf {
445 unsigned int offset;
446 unsigned int nsec;
447};
448/* 648/*
449 * Unmap the grant references, and also remove the M2P over-rides 649 * Unmap the grant references, and also remove the M2P over-rides
450 * used in the 'pending_req'. 650 * used in the 'pending_req'.
451 */ 651 */
452static void xen_blkbk_unmap(struct pending_req *req) 652static void xen_blkbk_unmap(struct xen_blkif *blkif,
653 struct grant_page *pages[],
654 int num)
453{ 655{
454 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 656 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
455 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 657 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
456 unsigned int i, invcount = 0; 658 unsigned int i, invcount = 0;
457 grant_handle_t handle;
458 int ret; 659 int ret;
459 660
460 for (i = 0; i < req->nr_pages; i++) { 661 for (i = 0; i < num; i++) {
461 if (!test_bit(i, req->unmap_seg)) 662 if (pages[i]->persistent_gnt != NULL) {
663 put_persistent_gnt(blkif, pages[i]->persistent_gnt);
462 continue; 664 continue;
463 handle = pending_handle(req, i); 665 }
464 if (handle == BLKBACK_INVALID_HANDLE) 666 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
465 continue; 667 continue;
466 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), 668 unmap_pages[invcount] = pages[i]->page;
467 GNTMAP_host_map, handle); 669 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
468 pending_handle(req, i) = BLKBACK_INVALID_HANDLE; 670 GNTMAP_host_map, pages[i]->handle);
469 pages[invcount] = virt_to_page(vaddr(req, i)); 671 pages[i]->handle = BLKBACK_INVALID_HANDLE;
470 invcount++; 672 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
673 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
674 invcount);
675 BUG_ON(ret);
676 put_free_pages(blkif, unmap_pages, invcount);
677 invcount = 0;
678 }
679 }
680 if (invcount) {
681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
682 BUG_ON(ret);
683 put_free_pages(blkif, unmap_pages, invcount);
471 } 684 }
472
473 ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
474 BUG_ON(ret);
475} 685}
476 686
477static int xen_blkbk_map(struct blkif_request *req, 687static int xen_blkbk_map(struct xen_blkif *blkif,
478 struct pending_req *pending_req, 688 struct grant_page *pages[],
479 struct seg_buf seg[], 689 int num, bool ro)
480 struct page *pages[])
481{ 690{
482 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 691 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
483 struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST];
484 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 692 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
485 struct persistent_gnt *persistent_gnt = NULL; 693 struct persistent_gnt *persistent_gnt = NULL;
486 struct xen_blkif *blkif = pending_req->blkif;
487 phys_addr_t addr = 0; 694 phys_addr_t addr = 0;
488 int i, j; 695 int i, seg_idx, new_map_idx;
489 bool new_map;
490 int nseg = req->u.rw.nr_segments;
491 int segs_to_map = 0; 696 int segs_to_map = 0;
492 int ret = 0; 697 int ret = 0;
698 int last_map = 0, map_until = 0;
493 int use_persistent_gnts; 699 int use_persistent_gnts;
494 700
495 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); 701 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
496 702
497 BUG_ON(blkif->persistent_gnt_c >
498 max_mapped_grant_pages(pending_req->blkif->blk_protocol));
499
500 /* 703 /*
501 * Fill out preq.nr_sects with proper amount of sectors, and setup 704 * Fill out preq.nr_sects with proper amount of sectors, and setup
502 * assign map[..] with the PFN of the page in our domain with the 705 * assign map[..] with the PFN of the page in our domain with the
503 * corresponding grant reference for each page. 706 * corresponding grant reference for each page.
504 */ 707 */
505 for (i = 0; i < nseg; i++) { 708again:
709 for (i = map_until; i < num; i++) {
506 uint32_t flags; 710 uint32_t flags;
507 711
508 if (use_persistent_gnts) 712 if (use_persistent_gnts)
509 persistent_gnt = get_persistent_gnt( 713 persistent_gnt = get_persistent_gnt(
510 &blkif->persistent_gnts, 714 blkif,
511 req->u.rw.seg[i].gref); 715 pages[i]->gref);
512 716
513 if (persistent_gnt) { 717 if (persistent_gnt) {
514 /* 718 /*
515 * We are using persistent grants and 719 * We are using persistent grants and
516 * the grant is already mapped 720 * the grant is already mapped
517 */ 721 */
518 new_map = false; 722 pages[i]->page = persistent_gnt->page;
519 } else if (use_persistent_gnts && 723 pages[i]->persistent_gnt = persistent_gnt;
520 blkif->persistent_gnt_c <
521 max_mapped_grant_pages(blkif->blk_protocol)) {
522 /*
523 * We are using persistent grants, the grant is
524 * not mapped but we have room for it
525 */
526 new_map = true;
527 persistent_gnt = kmalloc(
528 sizeof(struct persistent_gnt),
529 GFP_KERNEL);
530 if (!persistent_gnt)
531 return -ENOMEM;
532 if (alloc_xenballooned_pages(1, &persistent_gnt->page,
533 false)) {
534 kfree(persistent_gnt);
535 return -ENOMEM;
536 }
537 persistent_gnt->gnt = req->u.rw.seg[i].gref;
538 persistent_gnt->handle = BLKBACK_INVALID_HANDLE;
539
540 pages_to_gnt[segs_to_map] =
541 persistent_gnt->page;
542 addr = (unsigned long) pfn_to_kaddr(
543 page_to_pfn(persistent_gnt->page));
544
545 add_persistent_gnt(&blkif->persistent_gnts,
546 persistent_gnt);
547 blkif->persistent_gnt_c++;
548 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
549 persistent_gnt->gnt, blkif->persistent_gnt_c,
550 max_mapped_grant_pages(blkif->blk_protocol));
551 } else { 724 } else {
552 /* 725 if (get_free_page(blkif, &pages[i]->page))
553 * We are either using persistent grants and 726 goto out_of_memory;
554 * hit the maximum limit of grants mapped, 727 addr = vaddr(pages[i]->page);
555 * or we are not using persistent grants. 728 pages_to_gnt[segs_to_map] = pages[i]->page;
556 */ 729 pages[i]->persistent_gnt = NULL;
557 if (use_persistent_gnts &&
558 !blkif->vbd.overflow_max_grants) {
559 blkif->vbd.overflow_max_grants = 1;
560 pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
561 blkif->domid, blkif->vbd.handle);
562 }
563 new_map = true;
564 pages[i] = blkbk->pending_page(pending_req, i);
565 addr = vaddr(pending_req, i);
566 pages_to_gnt[segs_to_map] =
567 blkbk->pending_page(pending_req, i);
568 }
569
570 if (persistent_gnt) {
571 pages[i] = persistent_gnt->page;
572 persistent_gnts[i] = persistent_gnt;
573 } else {
574 persistent_gnts[i] = NULL;
575 }
576
577 if (new_map) {
578 flags = GNTMAP_host_map; 730 flags = GNTMAP_host_map;
579 if (!persistent_gnt && 731 if (!use_persistent_gnts && ro)
580 (pending_req->operation != BLKIF_OP_READ))
581 flags |= GNTMAP_readonly; 732 flags |= GNTMAP_readonly;
582 gnttab_set_map_op(&map[segs_to_map++], addr, 733 gnttab_set_map_op(&map[segs_to_map++], addr,
583 flags, req->u.rw.seg[i].gref, 734 flags, pages[i]->gref,
584 blkif->domid); 735 blkif->domid);
585 } 736 }
737 map_until = i + 1;
738 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
739 break;
586 } 740 }
587 741
588 if (segs_to_map) { 742 if (segs_to_map) {
@@ -595,49 +749,133 @@ static int xen_blkbk_map(struct blkif_request *req,
595 * so that when we access vaddr(pending_req,i) it has the contents of 749 * so that when we access vaddr(pending_req,i) it has the contents of
596 * the page from the other domain. 750 * the page from the other domain.
597 */ 751 */
598 bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 752 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
599 for (i = 0, j = 0; i < nseg; i++) { 753 if (!pages[seg_idx]->persistent_gnt) {
600 if (!persistent_gnts[i] ||
601 persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) {
602 /* This is a newly mapped grant */ 754 /* This is a newly mapped grant */
603 BUG_ON(j >= segs_to_map); 755 BUG_ON(new_map_idx >= segs_to_map);
604 if (unlikely(map[j].status != 0)) { 756 if (unlikely(map[new_map_idx].status != 0)) {
605 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); 757 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
606 map[j].handle = BLKBACK_INVALID_HANDLE; 758 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
607 ret |= 1; 759 ret |= 1;
608 if (persistent_gnts[i]) { 760 goto next;
609 rb_erase(&persistent_gnts[i]->node,
610 &blkif->persistent_gnts);
611 blkif->persistent_gnt_c--;
612 kfree(persistent_gnts[i]);
613 persistent_gnts[i] = NULL;
614 }
615 } 761 }
762 pages[seg_idx]->handle = map[new_map_idx].handle;
763 } else {
764 continue;
616 } 765 }
617 if (persistent_gnts[i]) { 766 if (use_persistent_gnts &&
618 if (persistent_gnts[i]->handle == 767 blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
619 BLKBACK_INVALID_HANDLE) { 768 /*
769 * We are using persistent grants, the grant is
770 * not mapped but we might have room for it.
771 */
772 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
773 GFP_KERNEL);
774 if (!persistent_gnt) {
620 /* 775 /*
621 * If this is a new persistent grant 776 * If we don't have enough memory to
622 * save the handler 777 * allocate the persistent_gnt struct
778 * map this grant non-persistenly
623 */ 779 */
624 persistent_gnts[i]->handle = map[j++].handle; 780 goto next;
625 } 781 }
626 pending_handle(pending_req, i) = 782 persistent_gnt->gnt = map[new_map_idx].ref;
627 persistent_gnts[i]->handle; 783 persistent_gnt->handle = map[new_map_idx].handle;
784 persistent_gnt->page = pages[seg_idx]->page;
785 if (add_persistent_gnt(blkif,
786 persistent_gnt)) {
787 kfree(persistent_gnt);
788 persistent_gnt = NULL;
789 goto next;
790 }
791 pages[seg_idx]->persistent_gnt = persistent_gnt;
792 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
793 persistent_gnt->gnt, blkif->persistent_gnt_c,
794 xen_blkif_max_pgrants);
795 goto next;
796 }
797 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
798 blkif->vbd.overflow_max_grants = 1;
799 pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
800 blkif->domid, blkif->vbd.handle);
801 }
802 /*
803 * We could not map this grant persistently, so use it as
804 * a non-persistent grant.
805 */
806next:
807 new_map_idx++;
808 }
809 segs_to_map = 0;
810 last_map = map_until;
811 if (map_until != num)
812 goto again;
628 813
629 if (ret) 814 return ret;
630 continue; 815
631 } else { 816out_of_memory:
632 pending_handle(pending_req, i) = map[j++].handle; 817 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
633 bitmap_set(pending_req->unmap_seg, i, 1); 818 put_free_pages(blkif, pages_to_gnt, segs_to_map);
819 return -ENOMEM;
820}
821
822static int xen_blkbk_map_seg(struct pending_req *pending_req)
823{
824 int rc;
825
826 rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
827 pending_req->nr_pages,
828 (pending_req->operation != BLKIF_OP_READ));
829
830 return rc;
831}
634 832
635 if (ret) 833static int xen_blkbk_parse_indirect(struct blkif_request *req,
636 continue; 834 struct pending_req *pending_req,
835 struct seg_buf seg[],
836 struct phys_req *preq)
837{
838 struct grant_page **pages = pending_req->indirect_pages;
839 struct xen_blkif *blkif = pending_req->blkif;
840 int indirect_grefs, rc, n, nseg, i;
841 struct blkif_request_segment_aligned *segments = NULL;
842
843 nseg = pending_req->nr_pages;
844 indirect_grefs = INDIRECT_PAGES(nseg);
845 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
846
847 for (i = 0; i < indirect_grefs; i++)
848 pages[i]->gref = req->u.indirect.indirect_grefs[i];
849
850 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
851 if (rc)
852 goto unmap;
853
854 for (n = 0, i = 0; n < nseg; n++) {
855 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
856 /* Map indirect segments */
857 if (segments)
858 kunmap_atomic(segments);
859 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
860 }
861 i = n % SEGS_PER_INDIRECT_FRAME;
862 pending_req->segments[n]->gref = segments[i].gref;
863 seg[n].nsec = segments[i].last_sect -
864 segments[i].first_sect + 1;
865 seg[n].offset = (segments[i].first_sect << 9);
866 if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
867 (segments[i].last_sect < segments[i].first_sect)) {
868 rc = -EINVAL;
869 goto unmap;
637 } 870 }
638 seg[i].offset = (req->u.rw.seg[i].first_sect << 9); 871 preq->nr_sects += seg[n].nsec;
639 } 872 }
640 return ret; 873
874unmap:
875 if (segments)
876 kunmap_atomic(segments);
877 xen_blkbk_unmap(blkif, pages, indirect_grefs);
878 return rc;
641} 879}
642 880
643static int dispatch_discard_io(struct xen_blkif *blkif, 881static int dispatch_discard_io(struct xen_blkif *blkif,
@@ -647,7 +885,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
647 int status = BLKIF_RSP_OKAY; 885 int status = BLKIF_RSP_OKAY;
648 struct block_device *bdev = blkif->vbd.bdev; 886 struct block_device *bdev = blkif->vbd.bdev;
649 unsigned long secure; 887 unsigned long secure;
888 struct phys_req preq;
889
890 preq.sector_number = req->u.discard.sector_number;
891 preq.nr_sects = req->u.discard.nr_sectors;
650 892
893 err = xen_vbd_translate(&preq, blkif, WRITE);
894 if (err) {
895 pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
896 preq.sector_number,
897 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
898 goto fail_response;
899 }
651 blkif->st_ds_req++; 900 blkif->st_ds_req++;
652 901
653 xen_blkif_get(blkif); 902 xen_blkif_get(blkif);
@@ -658,7 +907,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
658 err = blkdev_issue_discard(bdev, req->u.discard.sector_number, 907 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
659 req->u.discard.nr_sectors, 908 req->u.discard.nr_sectors,
660 GFP_KERNEL, secure); 909 GFP_KERNEL, secure);
661 910fail_response:
662 if (err == -EOPNOTSUPP) { 911 if (err == -EOPNOTSUPP) {
663 pr_debug(DRV_PFX "discard op failed, not supported\n"); 912 pr_debug(DRV_PFX "discard op failed, not supported\n");
664 status = BLKIF_RSP_EOPNOTSUPP; 913 status = BLKIF_RSP_EOPNOTSUPP;
@@ -674,7 +923,7 @@ static int dispatch_other_io(struct xen_blkif *blkif,
674 struct blkif_request *req, 923 struct blkif_request *req,
675 struct pending_req *pending_req) 924 struct pending_req *pending_req)
676{ 925{
677 free_req(pending_req); 926 free_req(blkif, pending_req);
678 make_response(blkif, req->u.other.id, req->operation, 927 make_response(blkif, req->u.other.id, req->operation,
679 BLKIF_RSP_EOPNOTSUPP); 928 BLKIF_RSP_EOPNOTSUPP);
680 return -EIO; 929 return -EIO;
@@ -726,7 +975,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
726 * the proper response on the ring. 975 * the proper response on the ring.
727 */ 976 */
728 if (atomic_dec_and_test(&pending_req->pendcnt)) { 977 if (atomic_dec_and_test(&pending_req->pendcnt)) {
729 xen_blkbk_unmap(pending_req); 978 xen_blkbk_unmap(pending_req->blkif,
979 pending_req->segments,
980 pending_req->nr_pages);
730 make_response(pending_req->blkif, pending_req->id, 981 make_response(pending_req->blkif, pending_req->id,
731 pending_req->operation, pending_req->status); 982 pending_req->operation, pending_req->status);
732 xen_blkif_put(pending_req->blkif); 983 xen_blkif_put(pending_req->blkif);
@@ -734,7 +985,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
734 if (atomic_read(&pending_req->blkif->drain)) 985 if (atomic_read(&pending_req->blkif->drain))
735 complete(&pending_req->blkif->drain_complete); 986 complete(&pending_req->blkif->drain_complete);
736 } 987 }
737 free_req(pending_req); 988 free_req(pending_req->blkif, pending_req);
738 } 989 }
739} 990}
740 991
@@ -767,6 +1018,12 @@ __do_block_io_op(struct xen_blkif *blkif)
767 rp = blk_rings->common.sring->req_prod; 1018 rp = blk_rings->common.sring->req_prod;
768 rmb(); /* Ensure we see queued requests up to 'rp'. */ 1019 rmb(); /* Ensure we see queued requests up to 'rp'. */
769 1020
1021 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1022 rc = blk_rings->common.rsp_prod_pvt;
1023 pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1024 rp, rc, rp - rc, blkif->vbd.pdevice);
1025 return -EACCES;
1026 }
770 while (rc != rp) { 1027 while (rc != rp) {
771 1028
772 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) 1029 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
@@ -777,7 +1034,7 @@ __do_block_io_op(struct xen_blkif *blkif)
777 break; 1034 break;
778 } 1035 }
779 1036
780 pending_req = alloc_req(); 1037 pending_req = alloc_req(blkif);
781 if (NULL == pending_req) { 1038 if (NULL == pending_req) {
782 blkif->st_oo_req++; 1039 blkif->st_oo_req++;
783 more_to_do = 1; 1040 more_to_do = 1;
@@ -807,11 +1064,12 @@ __do_block_io_op(struct xen_blkif *blkif)
807 case BLKIF_OP_WRITE: 1064 case BLKIF_OP_WRITE:
808 case BLKIF_OP_WRITE_BARRIER: 1065 case BLKIF_OP_WRITE_BARRIER:
809 case BLKIF_OP_FLUSH_DISKCACHE: 1066 case BLKIF_OP_FLUSH_DISKCACHE:
1067 case BLKIF_OP_INDIRECT:
810 if (dispatch_rw_block_io(blkif, &req, pending_req)) 1068 if (dispatch_rw_block_io(blkif, &req, pending_req))
811 goto done; 1069 goto done;
812 break; 1070 break;
813 case BLKIF_OP_DISCARD: 1071 case BLKIF_OP_DISCARD:
814 free_req(pending_req); 1072 free_req(blkif, pending_req);
815 if (dispatch_discard_io(blkif, &req)) 1073 if (dispatch_discard_io(blkif, &req))
816 goto done; 1074 goto done;
817 break; 1075 break;
@@ -853,17 +1111,28 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
853 struct pending_req *pending_req) 1111 struct pending_req *pending_req)
854{ 1112{
855 struct phys_req preq; 1113 struct phys_req preq;
856 struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1114 struct seg_buf *seg = pending_req->seg;
857 unsigned int nseg; 1115 unsigned int nseg;
858 struct bio *bio = NULL; 1116 struct bio *bio = NULL;
859 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1117 struct bio **biolist = pending_req->biolist;
860 int i, nbio = 0; 1118 int i, nbio = 0;
861 int operation; 1119 int operation;
862 struct blk_plug plug; 1120 struct blk_plug plug;
863 bool drain = false; 1121 bool drain = false;
864 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1122 struct grant_page **pages = pending_req->segments;
1123 unsigned short req_operation;
1124
1125 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1126 req->u.indirect.indirect_op : req->operation;
1127 if ((req->operation == BLKIF_OP_INDIRECT) &&
1128 (req_operation != BLKIF_OP_READ) &&
1129 (req_operation != BLKIF_OP_WRITE)) {
1130 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1131 req_operation);
1132 goto fail_response;
1133 }
865 1134
866 switch (req->operation) { 1135 switch (req_operation) {
867 case BLKIF_OP_READ: 1136 case BLKIF_OP_READ:
868 blkif->st_rd_req++; 1137 blkif->st_rd_req++;
869 operation = READ; 1138 operation = READ;
@@ -885,33 +1154,47 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
885 } 1154 }
886 1155
887 /* Check that the number of segments is sane. */ 1156 /* Check that the number of segments is sane. */
888 nseg = req->u.rw.nr_segments; 1157 nseg = req->operation == BLKIF_OP_INDIRECT ?
1158 req->u.indirect.nr_segments : req->u.rw.nr_segments;
889 1159
890 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || 1160 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
891 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 1161 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1162 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1163 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1164 (nseg > MAX_INDIRECT_SEGMENTS))) {
892 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 1165 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
893 nseg); 1166 nseg);
894 /* Haven't submitted any bio's yet. */ 1167 /* Haven't submitted any bio's yet. */
895 goto fail_response; 1168 goto fail_response;
896 } 1169 }
897 1170
898 preq.sector_number = req->u.rw.sector_number;
899 preq.nr_sects = 0; 1171 preq.nr_sects = 0;
900 1172
901 pending_req->blkif = blkif; 1173 pending_req->blkif = blkif;
902 pending_req->id = req->u.rw.id; 1174 pending_req->id = req->u.rw.id;
903 pending_req->operation = req->operation; 1175 pending_req->operation = req_operation;
904 pending_req->status = BLKIF_RSP_OKAY; 1176 pending_req->status = BLKIF_RSP_OKAY;
905 pending_req->nr_pages = nseg; 1177 pending_req->nr_pages = nseg;
906 1178
907 for (i = 0; i < nseg; i++) { 1179 if (req->operation != BLKIF_OP_INDIRECT) {
908 seg[i].nsec = req->u.rw.seg[i].last_sect - 1180 preq.dev = req->u.rw.handle;
909 req->u.rw.seg[i].first_sect + 1; 1181 preq.sector_number = req->u.rw.sector_number;
910 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || 1182 for (i = 0; i < nseg; i++) {
911 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) 1183 pages[i]->gref = req->u.rw.seg[i].gref;
1184 seg[i].nsec = req->u.rw.seg[i].last_sect -
1185 req->u.rw.seg[i].first_sect + 1;
1186 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1187 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1188 (req->u.rw.seg[i].last_sect <
1189 req->u.rw.seg[i].first_sect))
1190 goto fail_response;
1191 preq.nr_sects += seg[i].nsec;
1192 }
1193 } else {
1194 preq.dev = req->u.indirect.handle;
1195 preq.sector_number = req->u.indirect.sector_number;
1196 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
912 goto fail_response; 1197 goto fail_response;
913 preq.nr_sects += seg[i].nsec;
914
915 } 1198 }
916 1199
917 if (xen_vbd_translate(&preq, blkif, operation) != 0) { 1200 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
@@ -948,7 +1231,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
948 * the hypercall to unmap the grants - that is all done in 1231 * the hypercall to unmap the grants - that is all done in
949 * xen_blkbk_unmap. 1232 * xen_blkbk_unmap.
950 */ 1233 */
951 if (xen_blkbk_map(req, pending_req, seg, pages)) 1234 if (xen_blkbk_map_seg(pending_req))
952 goto fail_flush; 1235 goto fail_flush;
953 1236
954 /* 1237 /*
@@ -960,11 +1243,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
960 for (i = 0; i < nseg; i++) { 1243 for (i = 0; i < nseg; i++) {
961 while ((bio == NULL) || 1244 while ((bio == NULL) ||
962 (bio_add_page(bio, 1245 (bio_add_page(bio,
963 pages[i], 1246 pages[i]->page,
964 seg[i].nsec << 9, 1247 seg[i].nsec << 9,
965 seg[i].offset) == 0)) { 1248 seg[i].offset) == 0)) {
966 1249
967 bio = bio_alloc(GFP_KERNEL, nseg-i); 1250 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1251 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
968 if (unlikely(bio == NULL)) 1252 if (unlikely(bio == NULL))
969 goto fail_put_bio; 1253 goto fail_put_bio;
970 1254
@@ -1009,11 +1293,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1009 return 0; 1293 return 0;
1010 1294
1011 fail_flush: 1295 fail_flush:
1012 xen_blkbk_unmap(pending_req); 1296 xen_blkbk_unmap(blkif, pending_req->segments,
1297 pending_req->nr_pages);
1013 fail_response: 1298 fail_response:
1014 /* Haven't submitted any bio's yet. */ 1299 /* Haven't submitted any bio's yet. */
1015 make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); 1300 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1016 free_req(pending_req); 1301 free_req(blkif, pending_req);
1017 msleep(1); /* back off a bit */ 1302 msleep(1); /* back off a bit */
1018 return -EIO; 1303 return -EIO;
1019 1304
@@ -1070,73 +1355,20 @@ static void make_response(struct xen_blkif *blkif, u64 id,
1070 1355
1071static int __init xen_blkif_init(void) 1356static int __init xen_blkif_init(void)
1072{ 1357{
1073 int i, mmap_pages;
1074 int rc = 0; 1358 int rc = 0;
1075 1359
1076 if (!xen_domain()) 1360 if (!xen_domain())
1077 return -ENODEV; 1361 return -ENODEV;
1078 1362
1079 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
1080 if (!blkbk) {
1081 pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
1082 return -ENOMEM;
1083 }
1084
1085 mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1086
1087 blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) *
1088 xen_blkif_reqs, GFP_KERNEL);
1089 blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
1090 mmap_pages, GFP_KERNEL);
1091 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
1092 mmap_pages, GFP_KERNEL);
1093
1094 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
1095 !blkbk->pending_pages) {
1096 rc = -ENOMEM;
1097 goto out_of_memory;
1098 }
1099
1100 for (i = 0; i < mmap_pages; i++) {
1101 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
1102 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
1103 if (blkbk->pending_pages[i] == NULL) {
1104 rc = -ENOMEM;
1105 goto out_of_memory;
1106 }
1107 }
1108 rc = xen_blkif_interface_init(); 1363 rc = xen_blkif_interface_init();
1109 if (rc) 1364 if (rc)
1110 goto failed_init; 1365 goto failed_init;
1111 1366
1112 INIT_LIST_HEAD(&blkbk->pending_free);
1113 spin_lock_init(&blkbk->pending_free_lock);
1114 init_waitqueue_head(&blkbk->pending_free_wq);
1115
1116 for (i = 0; i < xen_blkif_reqs; i++)
1117 list_add_tail(&blkbk->pending_reqs[i].free_list,
1118 &blkbk->pending_free);
1119
1120 rc = xen_blkif_xenbus_init(); 1367 rc = xen_blkif_xenbus_init();
1121 if (rc) 1368 if (rc)
1122 goto failed_init; 1369 goto failed_init;
1123 1370
1124 return 0;
1125
1126 out_of_memory:
1127 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
1128 failed_init: 1371 failed_init:
1129 kfree(blkbk->pending_reqs);
1130 kfree(blkbk->pending_grant_handles);
1131 if (blkbk->pending_pages) {
1132 for (i = 0; i < mmap_pages; i++) {
1133 if (blkbk->pending_pages[i])
1134 __free_page(blkbk->pending_pages[i]);
1135 }
1136 kfree(blkbk->pending_pages);
1137 }
1138 kfree(blkbk);
1139 blkbk = NULL;
1140 return rc; 1372 return rc;
1141} 1373}
1142 1374
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 60103e2517ba..8d8807563d99 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -50,6 +50,19 @@
50 __func__, __LINE__, ##args) 50 __func__, __LINE__, ##args)
51 51
52 52
53/*
54 * This is the maximum number of segments that would be allowed in indirect
55 * requests. This value will also be passed to the frontend.
56 */
57#define MAX_INDIRECT_SEGMENTS 256
58
59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \
64 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
65
53/* Not a real protocol. Used to generate ring structs which contain 66/* Not a real protocol. Used to generate ring structs which contain
54 * the elements common to all protocols only. This way we get a 67 * the elements common to all protocols only. This way we get a
55 * compiler-checkable way to use common struct elements, so we can 68 * compiler-checkable way to use common struct elements, so we can
@@ -83,12 +96,31 @@ struct blkif_x86_32_request_other {
83 uint64_t id; /* private guest value, echoed in resp */ 96 uint64_t id; /* private guest value, echoed in resp */
84} __attribute__((__packed__)); 97} __attribute__((__packed__));
85 98
99struct blkif_x86_32_request_indirect {
100 uint8_t indirect_op;
101 uint16_t nr_segments;
102 uint64_t id;
103 blkif_sector_t sector_number;
104 blkif_vdev_t handle;
105 uint16_t _pad1;
106 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
107 /*
108 * The maximum number of indirect segments (and pages) that will
109 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
110 * is also exported to the guest (via xenstore
111 * feature-max-indirect-segments entry), so the frontend knows how
112 * many indirect segments the backend supports.
113 */
114 uint64_t _pad2; /* make it 64 byte aligned */
115} __attribute__((__packed__));
116
86struct blkif_x86_32_request { 117struct blkif_x86_32_request {
87 uint8_t operation; /* BLKIF_OP_??? */ 118 uint8_t operation; /* BLKIF_OP_??? */
88 union { 119 union {
89 struct blkif_x86_32_request_rw rw; 120 struct blkif_x86_32_request_rw rw;
90 struct blkif_x86_32_request_discard discard; 121 struct blkif_x86_32_request_discard discard;
91 struct blkif_x86_32_request_other other; 122 struct blkif_x86_32_request_other other;
123 struct blkif_x86_32_request_indirect indirect;
92 } u; 124 } u;
93} __attribute__((__packed__)); 125} __attribute__((__packed__));
94 126
@@ -127,12 +159,32 @@ struct blkif_x86_64_request_other {
127 uint64_t id; /* private guest value, echoed in resp */ 159 uint64_t id; /* private guest value, echoed in resp */
128} __attribute__((__packed__)); 160} __attribute__((__packed__));
129 161
162struct blkif_x86_64_request_indirect {
163 uint8_t indirect_op;
164 uint16_t nr_segments;
165 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
166 uint64_t id;
167 blkif_sector_t sector_number;
168 blkif_vdev_t handle;
169 uint16_t _pad2;
170 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
171 /*
172 * The maximum number of indirect segments (and pages) that will
173 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
174 * is also exported to the guest (via xenstore
175 * feature-max-indirect-segments entry), so the frontend knows how
176 * many indirect segments the backend supports.
177 */
178 uint32_t _pad3; /* make it 64 byte aligned */
179} __attribute__((__packed__));
180
130struct blkif_x86_64_request { 181struct blkif_x86_64_request {
131 uint8_t operation; /* BLKIF_OP_??? */ 182 uint8_t operation; /* BLKIF_OP_??? */
132 union { 183 union {
133 struct blkif_x86_64_request_rw rw; 184 struct blkif_x86_64_request_rw rw;
134 struct blkif_x86_64_request_discard discard; 185 struct blkif_x86_64_request_discard discard;
135 struct blkif_x86_64_request_other other; 186 struct blkif_x86_64_request_other other;
187 struct blkif_x86_64_request_indirect indirect;
136 } u; 188 } u;
137} __attribute__((__packed__)); 189} __attribute__((__packed__));
138 190
@@ -182,12 +234,26 @@ struct xen_vbd {
182 234
183struct backend_info; 235struct backend_info;
184 236
237/* Number of available flags */
238#define PERSISTENT_GNT_FLAGS_SIZE 2
239/* This persistent grant is currently in use */
240#define PERSISTENT_GNT_ACTIVE 0
241/*
242 * This persistent grant has been used, this flag is set when we remove the
243 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
244 */
245#define PERSISTENT_GNT_WAS_ACTIVE 1
246
247/* Number of requests that we can fit in a ring */
248#define XEN_BLKIF_REQS 32
185 249
186struct persistent_gnt { 250struct persistent_gnt {
187 struct page *page; 251 struct page *page;
188 grant_ref_t gnt; 252 grant_ref_t gnt;
189 grant_handle_t handle; 253 grant_handle_t handle;
254 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
190 struct rb_node node; 255 struct rb_node node;
256 struct list_head remove_node;
191}; 257};
192 258
193struct xen_blkif { 259struct xen_blkif {
@@ -219,6 +285,23 @@ struct xen_blkif {
219 /* tree to store persistent grants */ 285 /* tree to store persistent grants */
220 struct rb_root persistent_gnts; 286 struct rb_root persistent_gnts;
221 unsigned int persistent_gnt_c; 287 unsigned int persistent_gnt_c;
288 atomic_t persistent_gnt_in_use;
289 unsigned long next_lru;
290
291 /* used by the kworker that offload work from the persistent purge */
292 struct list_head persistent_purge_list;
293 struct work_struct persistent_purge_work;
294
295 /* buffer of free pages to map grant refs */
296 spinlock_t free_pages_lock;
297 int free_pages_num;
298 struct list_head free_pages;
299
300 /* List of all 'pending_req' available */
301 struct list_head pending_free;
302 /* And its spinlock. */
303 spinlock_t pending_free_lock;
304 wait_queue_head_t pending_free_wq;
222 305
223 /* statistics */ 306 /* statistics */
224 unsigned long st_print; 307 unsigned long st_print;
@@ -231,6 +314,41 @@ struct xen_blkif {
231 unsigned long long st_wr_sect; 314 unsigned long long st_wr_sect;
232 315
233 wait_queue_head_t waiting_to_free; 316 wait_queue_head_t waiting_to_free;
317 /* Thread shutdown wait queue. */
318 wait_queue_head_t shutdown_wq;
319};
320
321struct seg_buf {
322 unsigned long offset;
323 unsigned int nsec;
324};
325
326struct grant_page {
327 struct page *page;
328 struct persistent_gnt *persistent_gnt;
329 grant_handle_t handle;
330 grant_ref_t gref;
331};
332
333/*
334 * Each outstanding request that we've passed to the lower device layers has a
335 * 'pending_req' allocated to it. Each buffer_head that completes decrements
336 * the pendcnt towards zero. When it hits zero, the specified domain has a
337 * response queued for it, with the saved 'id' passed back.
338 */
339struct pending_req {
340 struct xen_blkif *blkif;
341 u64 id;
342 int nr_pages;
343 atomic_t pendcnt;
344 unsigned short operation;
345 int status;
346 struct list_head free_list;
347 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
348 /* Indirect descriptors */
349 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
350 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
351 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
234}; 352};
235 353
236 354
@@ -257,6 +375,7 @@ int xen_blkif_xenbus_init(void);
257 375
258irqreturn_t xen_blkif_be_int(int irq, void *dev_id); 376irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
259int xen_blkif_schedule(void *arg); 377int xen_blkif_schedule(void *arg);
378int xen_blkif_purge_persistent(void *arg);
260 379
261int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 380int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
262 struct backend_info *be, int state); 381 struct backend_info *be, int state);
@@ -268,7 +387,7 @@ struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
268static inline void blkif_get_x86_32_req(struct blkif_request *dst, 387static inline void blkif_get_x86_32_req(struct blkif_request *dst,
269 struct blkif_x86_32_request *src) 388 struct blkif_x86_32_request *src)
270{ 389{
271 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 390 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
272 dst->operation = src->operation; 391 dst->operation = src->operation;
273 switch (src->operation) { 392 switch (src->operation) {
274 case BLKIF_OP_READ: 393 case BLKIF_OP_READ:
@@ -291,6 +410,18 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
291 dst->u.discard.sector_number = src->u.discard.sector_number; 410 dst->u.discard.sector_number = src->u.discard.sector_number;
292 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 411 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
293 break; 412 break;
413 case BLKIF_OP_INDIRECT:
414 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
415 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
416 dst->u.indirect.handle = src->u.indirect.handle;
417 dst->u.indirect.id = src->u.indirect.id;
418 dst->u.indirect.sector_number = src->u.indirect.sector_number;
419 barrier();
420 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
421 for (i = 0; i < j; i++)
422 dst->u.indirect.indirect_grefs[i] =
423 src->u.indirect.indirect_grefs[i];
424 break;
294 default: 425 default:
295 /* 426 /*
296 * Don't know how to translate this op. Only get the 427 * Don't know how to translate this op. Only get the
@@ -304,7 +435,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
304static inline void blkif_get_x86_64_req(struct blkif_request *dst, 435static inline void blkif_get_x86_64_req(struct blkif_request *dst,
305 struct blkif_x86_64_request *src) 436 struct blkif_x86_64_request *src)
306{ 437{
307 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 438 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
308 dst->operation = src->operation; 439 dst->operation = src->operation;
309 switch (src->operation) { 440 switch (src->operation) {
310 case BLKIF_OP_READ: 441 case BLKIF_OP_READ:
@@ -327,6 +458,18 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
327 dst->u.discard.sector_number = src->u.discard.sector_number; 458 dst->u.discard.sector_number = src->u.discard.sector_number;
328 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 459 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
329 break; 460 break;
461 case BLKIF_OP_INDIRECT:
462 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
463 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
464 dst->u.indirect.handle = src->u.indirect.handle;
465 dst->u.indirect.id = src->u.indirect.id;
466 dst->u.indirect.sector_number = src->u.indirect.sector_number;
467 barrier();
468 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
469 for (i = 0; i < j; i++)
470 dst->u.indirect.indirect_grefs[i] =
471 src->u.indirect.indirect_grefs[i];
472 break;
330 default: 473 default:
331 /* 474 /*
332 * Don't know how to translate this op. Only get the 475 * Don't know how to translate this op. Only get the
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 04608a6502d7..fe5c3cd10c34 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -98,12 +98,17 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
98 err = PTR_ERR(blkif->xenblkd); 98 err = PTR_ERR(blkif->xenblkd);
99 blkif->xenblkd = NULL; 99 blkif->xenblkd = NULL;
100 xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); 100 xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
101 return;
101 } 102 }
102} 103}
103 104
104static struct xen_blkif *xen_blkif_alloc(domid_t domid) 105static struct xen_blkif *xen_blkif_alloc(domid_t domid)
105{ 106{
106 struct xen_blkif *blkif; 107 struct xen_blkif *blkif;
108 struct pending_req *req, *n;
109 int i, j;
110
111 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
107 112
108 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); 113 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
109 if (!blkif) 114 if (!blkif)
@@ -118,8 +123,57 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
118 blkif->st_print = jiffies; 123 blkif->st_print = jiffies;
119 init_waitqueue_head(&blkif->waiting_to_free); 124 init_waitqueue_head(&blkif->waiting_to_free);
120 blkif->persistent_gnts.rb_node = NULL; 125 blkif->persistent_gnts.rb_node = NULL;
126 spin_lock_init(&blkif->free_pages_lock);
127 INIT_LIST_HEAD(&blkif->free_pages);
128 blkif->free_pages_num = 0;
129 atomic_set(&blkif->persistent_gnt_in_use, 0);
130
131 INIT_LIST_HEAD(&blkif->pending_free);
132
133 for (i = 0; i < XEN_BLKIF_REQS; i++) {
134 req = kzalloc(sizeof(*req), GFP_KERNEL);
135 if (!req)
136 goto fail;
137 list_add_tail(&req->free_list,
138 &blkif->pending_free);
139 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
140 req->segments[j] = kzalloc(sizeof(*req->segments[0]),
141 GFP_KERNEL);
142 if (!req->segments[j])
143 goto fail;
144 }
145 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
146 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
147 GFP_KERNEL);
148 if (!req->indirect_pages[j])
149 goto fail;
150 }
151 }
152 spin_lock_init(&blkif->pending_free_lock);
153 init_waitqueue_head(&blkif->pending_free_wq);
154 init_waitqueue_head(&blkif->shutdown_wq);
121 155
122 return blkif; 156 return blkif;
157
158fail:
159 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
160 list_del(&req->free_list);
161 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
162 if (!req->segments[j])
163 break;
164 kfree(req->segments[j]);
165 }
166 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
167 if (!req->indirect_pages[j])
168 break;
169 kfree(req->indirect_pages[j]);
170 }
171 kfree(req);
172 }
173
174 kmem_cache_free(xen_blkif_cachep, blkif);
175
176 return ERR_PTR(-ENOMEM);
123} 177}
124 178
125static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, 179static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
@@ -178,6 +232,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
178{ 232{
179 if (blkif->xenblkd) { 233 if (blkif->xenblkd) {
180 kthread_stop(blkif->xenblkd); 234 kthread_stop(blkif->xenblkd);
235 wake_up(&blkif->shutdown_wq);
181 blkif->xenblkd = NULL; 236 blkif->xenblkd = NULL;
182 } 237 }
183 238
@@ -198,8 +253,28 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
198 253
199static void xen_blkif_free(struct xen_blkif *blkif) 254static void xen_blkif_free(struct xen_blkif *blkif)
200{ 255{
256 struct pending_req *req, *n;
257 int i = 0, j;
258
201 if (!atomic_dec_and_test(&blkif->refcnt)) 259 if (!atomic_dec_and_test(&blkif->refcnt))
202 BUG(); 260 BUG();
261
262 /* Check that there is no request in use */
263 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
264 list_del(&req->free_list);
265
266 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
267 kfree(req->segments[j]);
268
269 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
270 kfree(req->indirect_pages[j]);
271
272 kfree(req);
273 i++;
274 }
275
276 WARN_ON(i != XEN_BLKIF_REQS);
277
203 kmem_cache_free(xen_blkif_cachep, blkif); 278 kmem_cache_free(xen_blkif_cachep, blkif);
204} 279}
205 280
@@ -678,6 +753,11 @@ again:
678 dev->nodename); 753 dev->nodename);
679 goto abort; 754 goto abort;
680 } 755 }
756 err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u",
757 MAX_INDIRECT_SEGMENTS);
758 if (err)
759 dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)",
760 dev->nodename, err);
681 761
682 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 762 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
683 (unsigned long long)vbd_sz(&be->blkif->vbd)); 763 (unsigned long long)vbd_sz(&be->blkif->vbd));
@@ -704,6 +784,11 @@ again:
704 dev->nodename); 784 dev->nodename);
705 goto abort; 785 goto abort;
706 } 786 }
787 err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
788 bdev_physical_block_size(be->blkif->vbd.bdev));
789 if (err)
790 xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
791 dev->nodename);
707 792
708 err = xenbus_transaction_end(xbt, 0); 793 err = xenbus_transaction_end(xbt, 0);
709 if (err == -EAGAIN) 794 if (err == -EAGAIN)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d89ef86220f4..a4660bbee8a6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -74,12 +74,30 @@ struct grant {
74struct blk_shadow { 74struct blk_shadow {
75 struct blkif_request req; 75 struct blkif_request req;
76 struct request *request; 76 struct request *request;
77 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 77 struct grant **grants_used;
78 struct grant **indirect_grants;
79 struct scatterlist *sg;
80};
81
82struct split_bio {
83 struct bio *bio;
84 atomic_t pending;
85 int err;
78}; 86};
79 87
80static DEFINE_MUTEX(blkfront_mutex); 88static DEFINE_MUTEX(blkfront_mutex);
81static const struct block_device_operations xlvbd_block_fops; 89static const struct block_device_operations xlvbd_block_fops;
82 90
91/*
92 * Maximum number of segments in indirect requests, the actual value used by
93 * the frontend driver is the minimum of this value and the value provided
94 * by the backend driver.
95 */
96
97static unsigned int xen_blkif_max_segments = 32;
98module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
99MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
100
83#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) 101#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
84 102
85/* 103/*
@@ -98,7 +116,6 @@ struct blkfront_info
98 enum blkif_state connected; 116 enum blkif_state connected;
99 int ring_ref; 117 int ring_ref;
100 struct blkif_front_ring ring; 118 struct blkif_front_ring ring;
101 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
102 unsigned int evtchn, irq; 119 unsigned int evtchn, irq;
103 struct request_queue *rq; 120 struct request_queue *rq;
104 struct work_struct work; 121 struct work_struct work;
@@ -114,6 +131,7 @@ struct blkfront_info
114 unsigned int discard_granularity; 131 unsigned int discard_granularity;
115 unsigned int discard_alignment; 132 unsigned int discard_alignment;
116 unsigned int feature_persistent:1; 133 unsigned int feature_persistent:1;
134 unsigned int max_indirect_segments;
117 int is_ready; 135 int is_ready;
118}; 136};
119 137
@@ -142,6 +160,13 @@ static DEFINE_SPINLOCK(minor_lock);
142 160
143#define DEV_NAME "xvd" /* name in /dev */ 161#define DEV_NAME "xvd" /* name in /dev */
144 162
163#define SEGS_PER_INDIRECT_FRAME \
164 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
165#define INDIRECT_GREFS(_segs) \
166 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
167
168static int blkfront_setup_indirect(struct blkfront_info *info);
169
145static int get_id_from_freelist(struct blkfront_info *info) 170static int get_id_from_freelist(struct blkfront_info *info)
146{ 171{
147 unsigned long free = info->shadow_free; 172 unsigned long free = info->shadow_free;
@@ -358,7 +383,8 @@ static int blkif_queue_request(struct request *req)
358 struct blkif_request *ring_req; 383 struct blkif_request *ring_req;
359 unsigned long id; 384 unsigned long id;
360 unsigned int fsect, lsect; 385 unsigned int fsect, lsect;
361 int i, ref; 386 int i, ref, n;
387 struct blkif_request_segment_aligned *segments = NULL;
362 388
363 /* 389 /*
364 * Used to store if we are able to queue the request by just using 390 * Used to store if we are able to queue the request by just using
@@ -369,21 +395,27 @@ static int blkif_queue_request(struct request *req)
369 grant_ref_t gref_head; 395 grant_ref_t gref_head;
370 struct grant *gnt_list_entry = NULL; 396 struct grant *gnt_list_entry = NULL;
371 struct scatterlist *sg; 397 struct scatterlist *sg;
398 int nseg, max_grefs;
372 399
373 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 400 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
374 return 1; 401 return 1;
375 402
376 /* Check if we have enought grants to allocate a requests */ 403 max_grefs = info->max_indirect_segments ?
377 if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) { 404 info->max_indirect_segments +
405 INDIRECT_GREFS(info->max_indirect_segments) :
406 BLKIF_MAX_SEGMENTS_PER_REQUEST;
407
408 /* Check if we have enough grants to allocate a requests */
409 if (info->persistent_gnts_c < max_grefs) {
378 new_persistent_gnts = 1; 410 new_persistent_gnts = 1;
379 if (gnttab_alloc_grant_references( 411 if (gnttab_alloc_grant_references(
380 BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c, 412 max_grefs - info->persistent_gnts_c,
381 &gref_head) < 0) { 413 &gref_head) < 0) {
382 gnttab_request_free_callback( 414 gnttab_request_free_callback(
383 &info->callback, 415 &info->callback,
384 blkif_restart_queue_callback, 416 blkif_restart_queue_callback,
385 info, 417 info,
386 BLKIF_MAX_SEGMENTS_PER_REQUEST); 418 max_grefs);
387 return 1; 419 return 1;
388 } 420 }
389 } else 421 } else
@@ -394,42 +426,67 @@ static int blkif_queue_request(struct request *req)
394 id = get_id_from_freelist(info); 426 id = get_id_from_freelist(info);
395 info->shadow[id].request = req; 427 info->shadow[id].request = req;
396 428
397 ring_req->u.rw.id = id;
398 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
399 ring_req->u.rw.handle = info->handle;
400
401 ring_req->operation = rq_data_dir(req) ?
402 BLKIF_OP_WRITE : BLKIF_OP_READ;
403
404 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
405 /*
406 * Ideally we can do an unordered flush-to-disk. In case the
407 * backend onlysupports barriers, use that. A barrier request
408 * a superset of FUA, so we can implement it the same
409 * way. (It's also a FLUSH+FUA, since it is
410 * guaranteed ordered WRT previous writes.)
411 */
412 ring_req->operation = info->flush_op;
413 }
414
415 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { 429 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
416 /* id, sector_number and handle are set above. */
417 ring_req->operation = BLKIF_OP_DISCARD; 430 ring_req->operation = BLKIF_OP_DISCARD;
418 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 431 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
432 ring_req->u.discard.id = id;
433 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
419 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) 434 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
420 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; 435 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
421 else 436 else
422 ring_req->u.discard.flag = 0; 437 ring_req->u.discard.flag = 0;
423 } else { 438 } else {
424 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, 439 BUG_ON(info->max_indirect_segments == 0 &&
425 info->sg); 440 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
426 BUG_ON(ring_req->u.rw.nr_segments > 441 BUG_ON(info->max_indirect_segments &&
427 BLKIF_MAX_SEGMENTS_PER_REQUEST); 442 req->nr_phys_segments > info->max_indirect_segments);
428 443 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
429 for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) { 444 ring_req->u.rw.id = id;
445 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
446 /*
447 * The indirect operation can only be a BLKIF_OP_READ or
448 * BLKIF_OP_WRITE
449 */
450 BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
451 ring_req->operation = BLKIF_OP_INDIRECT;
452 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
453 BLKIF_OP_WRITE : BLKIF_OP_READ;
454 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
455 ring_req->u.indirect.handle = info->handle;
456 ring_req->u.indirect.nr_segments = nseg;
457 } else {
458 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
459 ring_req->u.rw.handle = info->handle;
460 ring_req->operation = rq_data_dir(req) ?
461 BLKIF_OP_WRITE : BLKIF_OP_READ;
462 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
463 /*
464 * Ideally we can do an unordered flush-to-disk. In case the
465 * backend onlysupports barriers, use that. A barrier request
466 * a superset of FUA, so we can implement it the same
467 * way. (It's also a FLUSH+FUA, since it is
468 * guaranteed ordered WRT previous writes.)
469 */
470 ring_req->operation = info->flush_op;
471 }
472 ring_req->u.rw.nr_segments = nseg;
473 }
474 for_each_sg(info->shadow[id].sg, sg, nseg, i) {
430 fsect = sg->offset >> 9; 475 fsect = sg->offset >> 9;
431 lsect = fsect + (sg->length >> 9) - 1; 476 lsect = fsect + (sg->length >> 9) - 1;
432 477
478 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
479 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
480 if (segments)
481 kunmap_atomic(segments);
482
483 n = i / SEGS_PER_INDIRECT_FRAME;
484 gnt_list_entry = get_grant(&gref_head, info);
485 info->shadow[id].indirect_grants[n] = gnt_list_entry;
486 segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
487 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
488 }
489
433 gnt_list_entry = get_grant(&gref_head, info); 490 gnt_list_entry = get_grant(&gref_head, info);
434 ref = gnt_list_entry->gref; 491 ref = gnt_list_entry->gref;
435 492
@@ -441,8 +498,7 @@ static int blkif_queue_request(struct request *req)
441 498
442 BUG_ON(sg->offset + sg->length > PAGE_SIZE); 499 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
443 500
444 shared_data = kmap_atomic( 501 shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
445 pfn_to_page(gnt_list_entry->pfn));
446 bvec_data = kmap_atomic(sg_page(sg)); 502 bvec_data = kmap_atomic(sg_page(sg));
447 503
448 /* 504 /*
@@ -461,13 +517,23 @@ static int blkif_queue_request(struct request *req)
461 kunmap_atomic(bvec_data); 517 kunmap_atomic(bvec_data);
462 kunmap_atomic(shared_data); 518 kunmap_atomic(shared_data);
463 } 519 }
464 520 if (ring_req->operation != BLKIF_OP_INDIRECT) {
465 ring_req->u.rw.seg[i] = 521 ring_req->u.rw.seg[i] =
466 (struct blkif_request_segment) { 522 (struct blkif_request_segment) {
467 .gref = ref, 523 .gref = ref,
468 .first_sect = fsect, 524 .first_sect = fsect,
469 .last_sect = lsect }; 525 .last_sect = lsect };
526 } else {
527 n = i % SEGS_PER_INDIRECT_FRAME;
528 segments[n] =
529 (struct blkif_request_segment_aligned) {
530 .gref = ref,
531 .first_sect = fsect,
532 .last_sect = lsect };
533 }
470 } 534 }
535 if (segments)
536 kunmap_atomic(segments);
471 } 537 }
472 538
473 info->ring.req_prod_pvt++; 539 info->ring.req_prod_pvt++;
@@ -542,7 +608,9 @@ wait:
542 flush_requests(info); 608 flush_requests(info);
543} 609}
544 610
545static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 611static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
612 unsigned int physical_sector_size,
613 unsigned int segments)
546{ 614{
547 struct request_queue *rq; 615 struct request_queue *rq;
548 struct blkfront_info *info = gd->private_data; 616 struct blkfront_info *info = gd->private_data;
@@ -564,14 +632,15 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
564 632
565 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 633 /* Hard sector size and max sectors impersonate the equiv. hardware. */
566 blk_queue_logical_block_size(rq, sector_size); 634 blk_queue_logical_block_size(rq, sector_size);
567 blk_queue_max_hw_sectors(rq, 512); 635 blk_queue_physical_block_size(rq, physical_sector_size);
636 blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512);
568 637
569 /* Each segment in a request is up to an aligned page in size. */ 638 /* Each segment in a request is up to an aligned page in size. */
570 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 639 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
571 blk_queue_max_segment_size(rq, PAGE_SIZE); 640 blk_queue_max_segment_size(rq, PAGE_SIZE);
572 641
573 /* Ensure a merged request will fit in a single I/O ring slot. */ 642 /* Ensure a merged request will fit in a single I/O ring slot. */
574 blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 643 blk_queue_max_segments(rq, segments);
575 644
576 /* Make sure buffer addresses are sector-aligned. */ 645 /* Make sure buffer addresses are sector-aligned. */
577 blk_queue_dma_alignment(rq, 511); 646 blk_queue_dma_alignment(rq, 511);
@@ -588,13 +657,16 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
588static void xlvbd_flush(struct blkfront_info *info) 657static void xlvbd_flush(struct blkfront_info *info)
589{ 658{
590 blk_queue_flush(info->rq, info->feature_flush); 659 blk_queue_flush(info->rq, info->feature_flush);
591 printk(KERN_INFO "blkfront: %s: %s: %s %s\n", 660 printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n",
592 info->gd->disk_name, 661 info->gd->disk_name,
593 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 662 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
594 "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? 663 "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
595 "flush diskcache" : "barrier or flush"), 664 "flush diskcache" : "barrier or flush"),
596 info->feature_flush ? "enabled" : "disabled", 665 info->feature_flush ? "enabled;" : "disabled;",
597 info->feature_persistent ? "using persistent grants" : ""); 666 "persistent grants:",
667 info->feature_persistent ? "enabled;" : "disabled;",
668 "indirect descriptors:",
669 info->max_indirect_segments ? "enabled;" : "disabled;");
598} 670}
599 671
600static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) 672static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -667,7 +739,8 @@ static char *encode_disk_name(char *ptr, unsigned int n)
667 739
668static int xlvbd_alloc_gendisk(blkif_sector_t capacity, 740static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
669 struct blkfront_info *info, 741 struct blkfront_info *info,
670 u16 vdisk_info, u16 sector_size) 742 u16 vdisk_info, u16 sector_size,
743 unsigned int physical_sector_size)
671{ 744{
672 struct gendisk *gd; 745 struct gendisk *gd;
673 int nr_minors = 1; 746 int nr_minors = 1;
@@ -734,7 +807,9 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
734 gd->driverfs_dev = &(info->xbdev->dev); 807 gd->driverfs_dev = &(info->xbdev->dev);
735 set_capacity(gd, capacity); 808 set_capacity(gd, capacity);
736 809
737 if (xlvbd_init_blk_queue(gd, sector_size)) { 810 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
811 info->max_indirect_segments ? :
812 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
738 del_gendisk(gd); 813 del_gendisk(gd);
739 goto release; 814 goto release;
740 } 815 }
@@ -818,6 +893,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
818{ 893{
819 struct grant *persistent_gnt; 894 struct grant *persistent_gnt;
820 struct grant *n; 895 struct grant *n;
896 int i, j, segs;
821 897
822 /* Prevent new requests being issued until we fix things up. */ 898 /* Prevent new requests being issued until we fix things up. */
823 spin_lock_irq(&info->io_lock); 899 spin_lock_irq(&info->io_lock);
@@ -843,6 +919,47 @@ static void blkif_free(struct blkfront_info *info, int suspend)
843 } 919 }
844 BUG_ON(info->persistent_gnts_c != 0); 920 BUG_ON(info->persistent_gnts_c != 0);
845 921
922 for (i = 0; i < BLK_RING_SIZE; i++) {
923 /*
924 * Clear persistent grants present in requests already
925 * on the shared ring
926 */
927 if (!info->shadow[i].request)
928 goto free_shadow;
929
930 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
931 info->shadow[i].req.u.indirect.nr_segments :
932 info->shadow[i].req.u.rw.nr_segments;
933 for (j = 0; j < segs; j++) {
934 persistent_gnt = info->shadow[i].grants_used[j];
935 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
936 __free_page(pfn_to_page(persistent_gnt->pfn));
937 kfree(persistent_gnt);
938 }
939
940 if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
941 /*
942 * If this is not an indirect operation don't try to
943 * free indirect segments
944 */
945 goto free_shadow;
946
947 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
948 persistent_gnt = info->shadow[i].indirect_grants[j];
949 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
950 __free_page(pfn_to_page(persistent_gnt->pfn));
951 kfree(persistent_gnt);
952 }
953
954free_shadow:
955 kfree(info->shadow[i].grants_used);
956 info->shadow[i].grants_used = NULL;
957 kfree(info->shadow[i].indirect_grants);
958 info->shadow[i].indirect_grants = NULL;
959 kfree(info->shadow[i].sg);
960 info->shadow[i].sg = NULL;
961 }
962
846 /* No more gnttab callback work. */ 963 /* No more gnttab callback work. */
847 gnttab_cancel_free_callback(&info->callback); 964 gnttab_cancel_free_callback(&info->callback);
848 spin_unlock_irq(&info->io_lock); 965 spin_unlock_irq(&info->io_lock);
@@ -867,12 +984,13 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
867 struct blkif_response *bret) 984 struct blkif_response *bret)
868{ 985{
869 int i = 0; 986 int i = 0;
870 struct bio_vec *bvec; 987 struct scatterlist *sg;
871 struct req_iterator iter;
872 unsigned long flags;
873 char *bvec_data; 988 char *bvec_data;
874 void *shared_data; 989 void *shared_data;
875 unsigned int offset = 0; 990 int nseg;
991
992 nseg = s->req.operation == BLKIF_OP_INDIRECT ?
993 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
876 994
877 if (bret->operation == BLKIF_OP_READ) { 995 if (bret->operation == BLKIF_OP_READ) {
878 /* 996 /*
@@ -881,26 +999,29 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
881 * than PAGE_SIZE, we have to keep track of the current offset, 999 * than PAGE_SIZE, we have to keep track of the current offset,
882 * to be sure we are copying the data from the right shared page. 1000 * to be sure we are copying the data from the right shared page.
883 */ 1001 */
884 rq_for_each_segment(bvec, s->request, iter) { 1002 for_each_sg(s->sg, sg, nseg, i) {
885 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); 1003 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
886 if (bvec->bv_offset < offset)
887 i++;
888 BUG_ON(i >= s->req.u.rw.nr_segments);
889 shared_data = kmap_atomic( 1004 shared_data = kmap_atomic(
890 pfn_to_page(s->grants_used[i]->pfn)); 1005 pfn_to_page(s->grants_used[i]->pfn));
891 bvec_data = bvec_kmap_irq(bvec, &flags); 1006 bvec_data = kmap_atomic(sg_page(sg));
892 memcpy(bvec_data, shared_data + bvec->bv_offset, 1007 memcpy(bvec_data + sg->offset,
893 bvec->bv_len); 1008 shared_data + sg->offset,
894 bvec_kunmap_irq(bvec_data, &flags); 1009 sg->length);
1010 kunmap_atomic(bvec_data);
895 kunmap_atomic(shared_data); 1011 kunmap_atomic(shared_data);
896 offset = bvec->bv_offset + bvec->bv_len;
897 } 1012 }
898 } 1013 }
899 /* Add the persistent grant into the list of free grants */ 1014 /* Add the persistent grant into the list of free grants */
900 for (i = 0; i < s->req.u.rw.nr_segments; i++) { 1015 for (i = 0; i < nseg; i++) {
901 list_add(&s->grants_used[i]->node, &info->persistent_gnts); 1016 list_add(&s->grants_used[i]->node, &info->persistent_gnts);
902 info->persistent_gnts_c++; 1017 info->persistent_gnts_c++;
903 } 1018 }
1019 if (s->req.operation == BLKIF_OP_INDIRECT) {
1020 for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
1021 list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
1022 info->persistent_gnts_c++;
1023 }
1024 }
904} 1025}
905 1026
906static irqreturn_t blkif_interrupt(int irq, void *dev_id) 1027static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -1034,14 +1155,6 @@ static int setup_blkring(struct xenbus_device *dev,
1034 SHARED_RING_INIT(sring); 1155 SHARED_RING_INIT(sring);
1035 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 1156 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
1036 1157
1037 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
1038
1039 /* Allocate memory for grants */
1040 err = fill_grant_buffer(info, BLK_RING_SIZE *
1041 BLKIF_MAX_SEGMENTS_PER_REQUEST);
1042 if (err)
1043 goto fail;
1044
1045 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 1158 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
1046 if (err < 0) { 1159 if (err < 0) {
1047 free_page((unsigned long)sring); 1160 free_page((unsigned long)sring);
@@ -1223,13 +1336,84 @@ static int blkfront_probe(struct xenbus_device *dev,
1223 return 0; 1336 return 0;
1224} 1337}
1225 1338
1339/*
1340 * This is a clone of md_trim_bio, used to split a bio into smaller ones
1341 */
1342static void trim_bio(struct bio *bio, int offset, int size)
1343{
1344 /* 'bio' is a cloned bio which we need to trim to match
1345 * the given offset and size.
1346 * This requires adjusting bi_sector, bi_size, and bi_io_vec
1347 */
1348 int i;
1349 struct bio_vec *bvec;
1350 int sofar = 0;
1351
1352 size <<= 9;
1353 if (offset == 0 && size == bio->bi_size)
1354 return;
1355
1356 bio->bi_sector += offset;
1357 bio->bi_size = size;
1358 offset <<= 9;
1359 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1360
1361 while (bio->bi_idx < bio->bi_vcnt &&
1362 bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
1363 /* remove this whole bio_vec */
1364 offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
1365 bio->bi_idx++;
1366 }
1367 if (bio->bi_idx < bio->bi_vcnt) {
1368 bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
1369 bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
1370 }
1371 /* avoid any complications with bi_idx being non-zero*/
1372 if (bio->bi_idx) {
1373 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
1374 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
1375 bio->bi_vcnt -= bio->bi_idx;
1376 bio->bi_idx = 0;
1377 }
1378 /* Make sure vcnt and last bv are not too big */
1379 bio_for_each_segment(bvec, bio, i) {
1380 if (sofar + bvec->bv_len > size)
1381 bvec->bv_len = size - sofar;
1382 if (bvec->bv_len == 0) {
1383 bio->bi_vcnt = i;
1384 break;
1385 }
1386 sofar += bvec->bv_len;
1387 }
1388}
1389
1390static void split_bio_end(struct bio *bio, int error)
1391{
1392 struct split_bio *split_bio = bio->bi_private;
1393
1394 if (error)
1395 split_bio->err = error;
1396
1397 if (atomic_dec_and_test(&split_bio->pending)) {
1398 split_bio->bio->bi_phys_segments = 0;
1399 bio_endio(split_bio->bio, split_bio->err);
1400 kfree(split_bio);
1401 }
1402 bio_put(bio);
1403}
1226 1404
1227static int blkif_recover(struct blkfront_info *info) 1405static int blkif_recover(struct blkfront_info *info)
1228{ 1406{
1229 int i; 1407 int i;
1230 struct blkif_request *req; 1408 struct request *req, *n;
1231 struct blk_shadow *copy; 1409 struct blk_shadow *copy;
1232 int j; 1410 int rc;
1411 struct bio *bio, *cloned_bio;
1412 struct bio_list bio_list, merge_bio;
1413 unsigned int segs, offset;
1414 int pending, size;
1415 struct split_bio *split_bio;
1416 struct list_head requests;
1233 1417
1234 /* Stage 1: Make a safe copy of the shadow state. */ 1418 /* Stage 1: Make a safe copy of the shadow state. */
1235 copy = kmemdup(info->shadow, sizeof(info->shadow), 1419 copy = kmemdup(info->shadow, sizeof(info->shadow),
@@ -1244,36 +1428,64 @@ static int blkif_recover(struct blkfront_info *info)
1244 info->shadow_free = info->ring.req_prod_pvt; 1428 info->shadow_free = info->ring.req_prod_pvt;
1245 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; 1429 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
1246 1430
1247 /* Stage 3: Find pending requests and requeue them. */ 1431 rc = blkfront_setup_indirect(info);
1432 if (rc) {
1433 kfree(copy);
1434 return rc;
1435 }
1436
1437 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
1438 blk_queue_max_segments(info->rq, segs);
1439 bio_list_init(&bio_list);
1440 INIT_LIST_HEAD(&requests);
1248 for (i = 0; i < BLK_RING_SIZE; i++) { 1441 for (i = 0; i < BLK_RING_SIZE; i++) {
1249 /* Not in use? */ 1442 /* Not in use? */
1250 if (!copy[i].request) 1443 if (!copy[i].request)
1251 continue; 1444 continue;
1252 1445
1253 /* Grab a request slot and copy shadow state into it. */ 1446 /*
1254 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 1447 * Get the bios in the request so we can re-queue them.
1255 *req = copy[i].req; 1448 */
1256 1449 if (copy[i].request->cmd_flags &
1257 /* We get a new request id, and must reset the shadow state. */ 1450 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1258 req->u.rw.id = get_id_from_freelist(info); 1451 /*
1259 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i])); 1452 * Flush operations don't contain bios, so
1260 1453 * we need to requeue the whole request
1261 if (req->operation != BLKIF_OP_DISCARD) { 1454 */
1262 /* Rewrite any grant references invalidated by susp/resume. */ 1455 list_add(&copy[i].request->queuelist, &requests);
1263 for (j = 0; j < req->u.rw.nr_segments; j++) 1456 continue;
1264 gnttab_grant_foreign_access_ref(
1265 req->u.rw.seg[j].gref,
1266 info->xbdev->otherend_id,
1267 pfn_to_mfn(copy[i].grants_used[j]->pfn),
1268 0);
1269 } 1457 }
1270 info->shadow[req->u.rw.id].req = *req; 1458 merge_bio.head = copy[i].request->bio;
1271 1459 merge_bio.tail = copy[i].request->biotail;
1272 info->ring.req_prod_pvt++; 1460 bio_list_merge(&bio_list, &merge_bio);
1461 copy[i].request->bio = NULL;
1462 blk_put_request(copy[i].request);
1273 } 1463 }
1274 1464
1275 kfree(copy); 1465 kfree(copy);
1276 1466
1467 /*
1468 * Empty the queue, this is important because we might have
1469 * requests in the queue with more segments than what we
1470 * can handle now.
1471 */
1472 spin_lock_irq(&info->io_lock);
1473 while ((req = blk_fetch_request(info->rq)) != NULL) {
1474 if (req->cmd_flags &
1475 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1476 list_add(&req->queuelist, &requests);
1477 continue;
1478 }
1479 merge_bio.head = req->bio;
1480 merge_bio.tail = req->biotail;
1481 bio_list_merge(&bio_list, &merge_bio);
1482 req->bio = NULL;
1483 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
1484 pr_alert("diskcache flush request found!\n");
1485 __blk_put_request(info->rq, req);
1486 }
1487 spin_unlock_irq(&info->io_lock);
1488
1277 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1489 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1278 1490
1279 spin_lock_irq(&info->io_lock); 1491 spin_lock_irq(&info->io_lock);
@@ -1281,14 +1493,50 @@ static int blkif_recover(struct blkfront_info *info)
1281 /* Now safe for us to use the shared ring */ 1493 /* Now safe for us to use the shared ring */
1282 info->connected = BLKIF_STATE_CONNECTED; 1494 info->connected = BLKIF_STATE_CONNECTED;
1283 1495
1284 /* Send off requeued requests */
1285 flush_requests(info);
1286
1287 /* Kick any other new requests queued since we resumed */ 1496 /* Kick any other new requests queued since we resumed */
1288 kick_pending_request_queues(info); 1497 kick_pending_request_queues(info);
1289 1498
1499 list_for_each_entry_safe(req, n, &requests, queuelist) {
1500 /* Requeue pending requests (flush or discard) */
1501 list_del_init(&req->queuelist);
1502 BUG_ON(req->nr_phys_segments > segs);
1503 blk_requeue_request(info->rq, req);
1504 }
1290 spin_unlock_irq(&info->io_lock); 1505 spin_unlock_irq(&info->io_lock);
1291 1506
1507 while ((bio = bio_list_pop(&bio_list)) != NULL) {
1508 /* Traverse the list of pending bios and re-queue them */
1509 if (bio_segments(bio) > segs) {
1510 /*
1511 * This bio has more segments than what we can
1512 * handle, we have to split it.
1513 */
1514 pending = (bio_segments(bio) + segs - 1) / segs;
1515 split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
1516 BUG_ON(split_bio == NULL);
1517 atomic_set(&split_bio->pending, pending);
1518 split_bio->bio = bio;
1519 for (i = 0; i < pending; i++) {
1520 offset = (i * segs * PAGE_SIZE) >> 9;
1521 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1522 (unsigned int)(bio->bi_size >> 9) - offset);
1523 cloned_bio = bio_clone(bio, GFP_NOIO);
1524 BUG_ON(cloned_bio == NULL);
1525 trim_bio(cloned_bio, offset, size);
1526 cloned_bio->bi_private = split_bio;
1527 cloned_bio->bi_end_io = split_bio_end;
1528 submit_bio(cloned_bio->bi_rw, cloned_bio);
1529 }
1530 /*
1531 * Now we have to wait for all those smaller bios to
1532 * end, so we can also end the "parent" bio.
1533 */
1534 continue;
1535 }
1536 /* We don't need to split this bio */
1537 submit_bio(bio->bi_rw, bio);
1538 }
1539
1292 return 0; 1540 return 0;
1293} 1541}
1294 1542
@@ -1308,8 +1556,12 @@ static int blkfront_resume(struct xenbus_device *dev)
1308 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 1556 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
1309 1557
1310 err = talk_to_blkback(dev, info); 1558 err = talk_to_blkback(dev, info);
1311 if (info->connected == BLKIF_STATE_SUSPENDED && !err) 1559
1312 err = blkif_recover(info); 1560 /*
1561 * We have to wait for the backend to switch to
1562 * connected state, since we want to read which
1563 * features it supports.
1564 */
1313 1565
1314 return err; 1566 return err;
1315} 1567}
@@ -1387,6 +1639,60 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1387 kfree(type); 1639 kfree(type);
1388} 1640}
1389 1641
1642static int blkfront_setup_indirect(struct blkfront_info *info)
1643{
1644 unsigned int indirect_segments, segs;
1645 int err, i;
1646
1647 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1648 "feature-max-indirect-segments", "%u", &indirect_segments,
1649 NULL);
1650 if (err) {
1651 info->max_indirect_segments = 0;
1652 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1653 } else {
1654 info->max_indirect_segments = min(indirect_segments,
1655 xen_blkif_max_segments);
1656 segs = info->max_indirect_segments;
1657 }
1658
1659 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE);
1660 if (err)
1661 goto out_of_memory;
1662
1663 for (i = 0; i < BLK_RING_SIZE; i++) {
1664 info->shadow[i].grants_used = kzalloc(
1665 sizeof(info->shadow[i].grants_used[0]) * segs,
1666 GFP_NOIO);
1667 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO);
1668 if (info->max_indirect_segments)
1669 info->shadow[i].indirect_grants = kzalloc(
1670 sizeof(info->shadow[i].indirect_grants[0]) *
1671 INDIRECT_GREFS(segs),
1672 GFP_NOIO);
1673 if ((info->shadow[i].grants_used == NULL) ||
1674 (info->shadow[i].sg == NULL) ||
1675 (info->max_indirect_segments &&
1676 (info->shadow[i].indirect_grants == NULL)))
1677 goto out_of_memory;
1678 sg_init_table(info->shadow[i].sg, segs);
1679 }
1680
1681
1682 return 0;
1683
1684out_of_memory:
1685 for (i = 0; i < BLK_RING_SIZE; i++) {
1686 kfree(info->shadow[i].grants_used);
1687 info->shadow[i].grants_used = NULL;
1688 kfree(info->shadow[i].sg);
1689 info->shadow[i].sg = NULL;
1690 kfree(info->shadow[i].indirect_grants);
1691 info->shadow[i].indirect_grants = NULL;
1692 }
1693 return -ENOMEM;
1694}
1695
1390/* 1696/*
1391 * Invoked when the backend is finally 'ready' (and has told produced 1697 * Invoked when the backend is finally 'ready' (and has told produced
1392 * the details about the physical device - #sectors, size, etc). 1698 * the details about the physical device - #sectors, size, etc).
@@ -1395,6 +1701,7 @@ static void blkfront_connect(struct blkfront_info *info)
1395{ 1701{
1396 unsigned long long sectors; 1702 unsigned long long sectors;
1397 unsigned long sector_size; 1703 unsigned long sector_size;
1704 unsigned int physical_sector_size;
1398 unsigned int binfo; 1705 unsigned int binfo;
1399 int err; 1706 int err;
1400 int barrier, flush, discard, persistent; 1707 int barrier, flush, discard, persistent;
@@ -1414,8 +1721,15 @@ static void blkfront_connect(struct blkfront_info *info)
1414 set_capacity(info->gd, sectors); 1721 set_capacity(info->gd, sectors);
1415 revalidate_disk(info->gd); 1722 revalidate_disk(info->gd);
1416 1723
1417 /* fall through */ 1724 return;
1418 case BLKIF_STATE_SUSPENDED: 1725 case BLKIF_STATE_SUSPENDED:
1726 /*
1727 * If we are recovering from suspension, we need to wait
1728 * for the backend to announce it's features before
1729 * reconnecting, at least we need to know if the backend
1730 * supports indirect descriptors, and how many.
1731 */
1732 blkif_recover(info);
1419 return; 1733 return;
1420 1734
1421 default: 1735 default:
@@ -1437,6 +1751,16 @@ static void blkfront_connect(struct blkfront_info *info)
1437 return; 1751 return;
1438 } 1752 }
1439 1753
1754 /*
1755 * physcial-sector-size is a newer field, so old backends may not
1756 * provide this. Assume physical sector size to be the same as
1757 * sector_size in that case.
1758 */
1759 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1760 "physical-sector-size", "%u", &physical_sector_size);
1761 if (err != 1)
1762 physical_sector_size = sector_size;
1763
1440 info->feature_flush = 0; 1764 info->feature_flush = 0;
1441 info->flush_op = 0; 1765 info->flush_op = 0;
1442 1766
@@ -1483,7 +1807,15 @@ static void blkfront_connect(struct blkfront_info *info)
1483 else 1807 else
1484 info->feature_persistent = persistent; 1808 info->feature_persistent = persistent;
1485 1809
1486 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1810 err = blkfront_setup_indirect(info);
1811 if (err) {
1812 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1813 info->xbdev->otherend);
1814 return;
1815 }
1816
1817 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
1818 physical_sector_size);
1487 if (err) { 1819 if (err) {
1488 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 1820 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
1489 info->xbdev->otherend); 1821 info->xbdev->otherend);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 11f467c00d0a..a12b923bbaca 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = {
91 { USB_DEVICE(0x0489, 0xe04e) }, 91 { USB_DEVICE(0x0489, 0xe04e) },
92 { USB_DEVICE(0x0489, 0xe056) }, 92 { USB_DEVICE(0x0489, 0xe056) },
93 { USB_DEVICE(0x0489, 0xe04d) }, 93 { USB_DEVICE(0x0489, 0xe04d) },
94 { USB_DEVICE(0x04c5, 0x1330) },
95 { USB_DEVICE(0x13d3, 0x3402) },
96 { USB_DEVICE(0x0cf3, 0x3121) },
97 { USB_DEVICE(0x0cf3, 0xe003) },
94 98
95 /* Atheros AR5BBU12 with sflash firmware */ 99 /* Atheros AR5BBU12 with sflash firmware */
96 { USB_DEVICE(0x0489, 0xE02C) }, 100 { USB_DEVICE(0x0489, 0xE02C) },
@@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
128 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
129 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
131 139
132 /* Atheros AR5BBU22 with sflash firmware */ 140 /* Atheros AR5BBU22 with sflash firmware */
133 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@ -193,24 +201,44 @@ error:
193 201
194static int ath3k_get_state(struct usb_device *udev, unsigned char *state) 202static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
195{ 203{
196 int pipe = 0; 204 int ret, pipe = 0;
205 char *buf;
206
207 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
208 if (!buf)
209 return -ENOMEM;
197 210
198 pipe = usb_rcvctrlpipe(udev, 0); 211 pipe = usb_rcvctrlpipe(udev, 0);
199 return usb_control_msg(udev, pipe, ATH3K_GETSTATE, 212 ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
200 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, 213 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
201 state, 0x01, USB_CTRL_SET_TIMEOUT); 214 buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
215
216 *state = *buf;
217 kfree(buf);
218
219 return ret;
202} 220}
203 221
204static int ath3k_get_version(struct usb_device *udev, 222static int ath3k_get_version(struct usb_device *udev,
205 struct ath3k_version *version) 223 struct ath3k_version *version)
206{ 224{
207 int pipe = 0; 225 int ret, pipe = 0;
226 struct ath3k_version *buf;
227 const int size = sizeof(*buf);
228
229 buf = kmalloc(size, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
208 232
209 pipe = usb_rcvctrlpipe(udev, 0); 233 pipe = usb_rcvctrlpipe(udev, 0);
210 return usb_control_msg(udev, pipe, ATH3K_GETVERSION, 234 ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
211 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, 235 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
212 sizeof(struct ath3k_version), 236 buf, size, USB_CTRL_SET_TIMEOUT);
213 USB_CTRL_SET_TIMEOUT); 237
238 memcpy(version, buf, size);
239 kfree(buf);
240
241 return ret;
214} 242}
215 243
216static int ath3k_load_fwfile(struct usb_device *udev, 244static int ath3k_load_fwfile(struct usb_device *udev,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index de4cf4daa2f4..8e16f0af6358 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,10 @@ static struct usb_device_id blacklist_table[] = {
154 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 155 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
160 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
157 161
158 /* Atheros AR5BBU12 with sflash firmware */ 162 /* Atheros AR5BBU12 with sflash firmware */
159 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 163 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -1095,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
1095 if (IS_ERR(skb)) { 1099 if (IS_ERR(skb)) {
1096 BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", 1100 BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
1097 hdev->name, cmd->opcode, PTR_ERR(skb)); 1101 hdev->name, cmd->opcode, PTR_ERR(skb));
1098 return -PTR_ERR(skb); 1102 return PTR_ERR(skb);
1099 } 1103 }
1100 1104
1101 /* It ensures that the returned event matches the event data read from 1105 /* It ensures that the returned event matches the event data read from
@@ -1147,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1147 if (IS_ERR(skb)) { 1151 if (IS_ERR(skb)) {
1148 BT_ERR("%s sending initial HCI reset command failed (%ld)", 1152 BT_ERR("%s sending initial HCI reset command failed (%ld)",
1149 hdev->name, PTR_ERR(skb)); 1153 hdev->name, PTR_ERR(skb));
1150 return -PTR_ERR(skb); 1154 return PTR_ERR(skb);
1151 } 1155 }
1152 kfree_skb(skb); 1156 kfree_skb(skb);
1153 1157
@@ -1161,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1161 if (IS_ERR(skb)) { 1165 if (IS_ERR(skb)) {
1162 BT_ERR("%s reading Intel fw version command failed (%ld)", 1166 BT_ERR("%s reading Intel fw version command failed (%ld)",
1163 hdev->name, PTR_ERR(skb)); 1167 hdev->name, PTR_ERR(skb));
1164 return -PTR_ERR(skb); 1168 return PTR_ERR(skb);
1165 } 1169 }
1166 1170
1167 if (skb->len != sizeof(*ver)) { 1171 if (skb->len != sizeof(*ver)) {
@@ -1219,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1219 BT_ERR("%s entering Intel manufacturer mode failed (%ld)", 1223 BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
1220 hdev->name, PTR_ERR(skb)); 1224 hdev->name, PTR_ERR(skb));
1221 release_firmware(fw); 1225 release_firmware(fw);
1222 return -PTR_ERR(skb); 1226 return PTR_ERR(skb);
1223 } 1227 }
1224 1228
1225 if (skb->data[0]) { 1229 if (skb->data[0]) {
@@ -1276,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1276 if (IS_ERR(skb)) { 1280 if (IS_ERR(skb)) {
1277 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", 1281 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1278 hdev->name, PTR_ERR(skb)); 1282 hdev->name, PTR_ERR(skb));
1279 return -PTR_ERR(skb); 1283 return PTR_ERR(skb);
1280 } 1284 }
1281 kfree_skb(skb); 1285 kfree_skb(skb);
1282 1286
@@ -1292,7 +1296,7 @@ exit_mfg_disable:
1292 if (IS_ERR(skb)) { 1296 if (IS_ERR(skb)) {
1293 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", 1297 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1294 hdev->name, PTR_ERR(skb)); 1298 hdev->name, PTR_ERR(skb));
1295 return -PTR_ERR(skb); 1299 return PTR_ERR(skb);
1296 } 1300 }
1297 kfree_skb(skb); 1301 kfree_skb(skb);
1298 1302
@@ -1310,7 +1314,7 @@ exit_mfg_deactivate:
1310 if (IS_ERR(skb)) { 1314 if (IS_ERR(skb)) {
1311 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", 1315 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1312 hdev->name, PTR_ERR(skb)); 1316 hdev->name, PTR_ERR(skb));
1313 return -PTR_ERR(skb); 1317 return PTR_ERR(skb);
1314 } 1318 }
1315 kfree_skb(skb); 1319 kfree_skb(skb);
1316 1320
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index bf5d2477cb77..15f2e7025b78 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -129,7 +129,8 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
129 off_t j, io_pg_start; 129 off_t j, io_pg_start;
130 int io_pg_count; 130 int io_pg_count;
131 131
132 if (type != 0 || mem->type != 0) { 132 if (type != mem->type ||
133 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
133 return -EINVAL; 134 return -EINVAL;
134 } 135 }
135 136
@@ -175,7 +176,8 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
175 struct _parisc_agp_info *info = &parisc_agp_info; 176 struct _parisc_agp_info *info = &parisc_agp_info;
176 int i, io_pg_start, io_pg_count; 177 int i, io_pg_start, io_pg_count;
177 178
178 if (type != 0 || mem->type != 0) { 179 if (type != mem->type ||
180 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
179 return -EINVAL; 181 return -EINVAL;
180 } 182 }
181 183
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 1b456fe9b87a..fc45567ad3ac 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
272 unsigned long flags; 272 unsigned long flags;
273 273
274 spin_lock_irqsave(&portdev->ports_lock, flags); 274 spin_lock_irqsave(&portdev->ports_lock, flags);
275 list_for_each_entry(port, &portdev->ports, list) 275 list_for_each_entry(port, &portdev->ports, list) {
276 if (port->cdev->dev == dev) 276 if (port->cdev->dev == dev) {
277 kref_get(&port->kref);
277 goto out; 278 goto out;
279 }
280 }
278 port = NULL; 281 port = NULL;
279out: 282out:
280 spin_unlock_irqrestore(&portdev->ports_lock, flags); 283 spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
746 749
747 port = filp->private_data; 750 port = filp->private_data;
748 751
752 /* Port is hot-unplugged. */
753 if (!port->guest_connected)
754 return -ENODEV;
755
749 if (!port_has_data(port)) { 756 if (!port_has_data(port)) {
750 /* 757 /*
751 * If nothing's connected on the host just return 0 in 758 * If nothing's connected on the host just return 0 in
@@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
762 if (ret < 0) 769 if (ret < 0)
763 return ret; 770 return ret;
764 } 771 }
765 /* Port got hot-unplugged. */ 772 /* Port got hot-unplugged while we were waiting above. */
766 if (!port->guest_connected) 773 if (!port->guest_connected)
767 return -ENODEV; 774 return -ENODEV;
768 /* 775 /*
@@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
932 if (is_rproc_serial(port->out_vq->vdev)) 939 if (is_rproc_serial(port->out_vq->vdev))
933 return -EINVAL; 940 return -EINVAL;
934 941
942 /*
943 * pipe->nrbufs == 0 means there are no data to transfer,
944 * so this returns just 0 for no data.
945 */
946 pipe_lock(pipe);
947 if (!pipe->nrbufs) {
948 ret = 0;
949 goto error_out;
950 }
951
935 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); 952 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
936 if (ret < 0) 953 if (ret < 0)
937 return ret; 954 goto error_out;
938 955
939 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); 956 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
940 if (!buf) 957 if (!buf) {
941 return -ENOMEM; 958 ret = -ENOMEM;
959 goto error_out;
960 }
942 961
943 sgl.n = 0; 962 sgl.n = 0;
944 sgl.len = 0; 963 sgl.len = 0;
@@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
946 sgl.sg = buf->sg; 965 sgl.sg = buf->sg;
947 sg_init_table(sgl.sg, sgl.size); 966 sg_init_table(sgl.sg, sgl.size);
948 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); 967 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
968 pipe_unlock(pipe);
949 if (likely(ret > 0)) 969 if (likely(ret > 0))
950 ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); 970 ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
951 971
952 if (unlikely(ret <= 0)) 972 if (unlikely(ret <= 0))
953 free_buf(buf, true); 973 free_buf(buf, true);
954 return ret; 974 return ret;
975
976error_out:
977 pipe_unlock(pipe);
978 return ret;
955} 979}
956 980
957static unsigned int port_fops_poll(struct file *filp, poll_table *wait) 981static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
1019 struct port *port; 1043 struct port *port;
1020 int ret; 1044 int ret;
1021 1045
1046 /* We get the port with a kref here */
1022 port = find_port_by_devt(cdev->dev); 1047 port = find_port_by_devt(cdev->dev);
1048 if (!port) {
1049 /* Port was unplugged before we could proceed */
1050 return -ENXIO;
1051 }
1023 filp->private_data = port; 1052 filp->private_data = port;
1024 1053
1025 /* Prevent against a port getting hot-unplugged at the same time */
1026 spin_lock_irq(&port->portdev->ports_lock);
1027 kref_get(&port->kref);
1028 spin_unlock_irq(&port->portdev->ports_lock);
1029
1030 /* 1054 /*
1031 * Don't allow opening of console port devices -- that's done 1055 * Don't allow opening of console port devices -- that's done
1032 * via /dev/hvc 1056 * via /dev/hvc
@@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref)
1498 1522
1499 port = container_of(kref, struct port, kref); 1523 port = container_of(kref, struct port, kref);
1500 1524
1501 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1502 device_destroy(pdrvdata.class, port->dev->devt);
1503 cdev_del(port->cdev);
1504
1505 kfree(port->name);
1506
1507 debugfs_remove(port->debugfs_file);
1508
1509 kfree(port); 1525 kfree(port);
1510} 1526}
1511 1527
@@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port)
1539 spin_unlock_irq(&port->portdev->ports_lock); 1555 spin_unlock_irq(&port->portdev->ports_lock);
1540 1556
1541 if (port->guest_connected) { 1557 if (port->guest_connected) {
1558 /* Let the app know the port is going down. */
1559 send_sigio_to_port(port);
1560
1561 /* Do this after sigio is actually sent */
1542 port->guest_connected = false; 1562 port->guest_connected = false;
1543 port->host_connected = false; 1563 port->host_connected = false;
1544 wake_up_interruptible(&port->waitqueue);
1545 1564
1546 /* Let the app know the port is going down. */ 1565 wake_up_interruptible(&port->waitqueue);
1547 send_sigio_to_port(port);
1548 } 1566 }
1549 1567
1550 if (is_console_port(port)) { 1568 if (is_console_port(port)) {
@@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port)
1563 */ 1581 */
1564 port->portdev = NULL; 1582 port->portdev = NULL;
1565 1583
1584 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1585 device_destroy(pdrvdata.class, port->dev->devt);
1586 cdev_del(port->cdev);
1587
1588 kfree(port->name);
1589
1590 debugfs_remove(port->debugfs_file);
1591
1566 /* 1592 /*
1567 * Locks around here are not necessary - a port can't be 1593 * Locks around here are not necessary - a port can't be
1568 * opened after we removed the port struct from ports_list 1594 * opened after we removed the port struct from ports_list
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 053d846ab5b1..ffadd836e0b5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -123,7 +123,7 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
123 return 0; 123 return 0;
124} 124}
125 125
126static int __cpuinit arch_timer_setup(struct clock_event_device *clk) 126static int arch_timer_setup(struct clock_event_device *clk)
127{ 127{
128 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 128 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
129 clk->name = "arch_sys_timer"; 129 clk->name = "arch_sys_timer";
@@ -221,7 +221,7 @@ struct timecounter *arch_timer_get_timecounter(void)
221 return &timecounter; 221 return &timecounter;
222} 222}
223 223
224static void __cpuinit arch_timer_stop(struct clock_event_device *clk) 224static void arch_timer_stop(struct clock_event_device *clk)
225{ 225{
226 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 226 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
227 clk->irq, smp_processor_id()); 227 clk->irq, smp_processor_id());
@@ -237,7 +237,7 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
237 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); 237 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
238} 238}
239 239
240static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, 240static int arch_timer_cpu_notify(struct notifier_block *self,
241 unsigned long action, void *hcpu) 241 unsigned long action, void *hcpu)
242{ 242{
243 /* 243 /*
@@ -256,7 +256,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
256 return NOTIFY_OK; 256 return NOTIFY_OK;
257} 257}
258 258
259static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { 259static struct notifier_block arch_timer_cpu_nb = {
260 .notifier_call = arch_timer_cpu_notify, 260 .notifier_call = arch_timer_cpu_notify,
261}; 261};
262 262
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index db8afc7427a6..b66c1f36066c 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -164,7 +164,7 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
164 return IRQ_HANDLED; 164 return IRQ_HANDLED;
165} 165}
166 166
167static int __cpuinit gt_clockevents_init(struct clock_event_device *clk) 167static int gt_clockevents_init(struct clock_event_device *clk)
168{ 168{
169 int cpu = smp_processor_id(); 169 int cpu = smp_processor_id();
170 170
@@ -221,8 +221,8 @@ static void __init gt_clocksource_init(void)
221 clocksource_register_hz(&gt_clocksource, gt_clk_rate); 221 clocksource_register_hz(&gt_clocksource, gt_clk_rate);
222} 222}
223 223
224static int __cpuinit gt_cpu_notify(struct notifier_block *self, 224static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
225 unsigned long action, void *hcpu) 225 void *hcpu)
226{ 226{
227 switch (action & ~CPU_TASKS_FROZEN) { 227 switch (action & ~CPU_TASKS_FROZEN) {
228 case CPU_STARTING: 228 case CPU_STARTING:
@@ -235,7 +235,7 @@ static int __cpuinit gt_cpu_notify(struct notifier_block *self,
235 235
236 return NOTIFY_OK; 236 return NOTIFY_OK;
237} 237}
238static struct notifier_block gt_cpu_nb __cpuinitdata = { 238static struct notifier_block gt_cpu_nb = {
239 .notifier_call = gt_cpu_notify, 239 .notifier_call = gt_cpu_notify,
240}; 240};
241 241
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 1f55f9620338..b3eb582d6a6f 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -25,7 +25,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
25 */ 25 */
26} 26}
27 27
28static void __cpuinit dummy_timer_setup(void) 28static void dummy_timer_setup(void)
29{ 29{
30 int cpu = smp_processor_id(); 30 int cpu = smp_processor_id();
31 struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); 31 struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt);
@@ -41,7 +41,7 @@ static void __cpuinit dummy_timer_setup(void)
41 clockevents_register_device(evt); 41 clockevents_register_device(evt);
42} 42}
43 43
44static int __cpuinit dummy_timer_cpu_notify(struct notifier_block *self, 44static int dummy_timer_cpu_notify(struct notifier_block *self,
45 unsigned long action, void *hcpu) 45 unsigned long action, void *hcpu)
46{ 46{
47 if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) 47 if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
@@ -50,7 +50,7 @@ static int __cpuinit dummy_timer_cpu_notify(struct notifier_block *self,
50 return NOTIFY_OK; 50 return NOTIFY_OK;
51} 51}
52 52
53static struct notifier_block dummy_timer_cpu_nb __cpuinitdata = { 53static struct notifier_block dummy_timer_cpu_nb = {
54 .notifier_call = dummy_timer_cpu_notify, 54 .notifier_call = dummy_timer_cpu_notify,
55}; 55};
56 56
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index a70480409ea5..b2bbc415f120 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -400,7 +400,7 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
400 return IRQ_HANDLED; 400 return IRQ_HANDLED;
401} 401}
402 402
403static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) 403static int exynos4_local_timer_setup(struct clock_event_device *evt)
404{ 404{
405 struct mct_clock_event_device *mevt; 405 struct mct_clock_event_device *mevt;
406 unsigned int cpu = smp_processor_id(); 406 unsigned int cpu = smp_processor_id();
@@ -448,7 +448,7 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt)
448 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 448 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
449} 449}
450 450
451static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = { 451static struct local_timer_ops exynos4_mct_tick_ops = {
452 .setup = exynos4_local_timer_setup, 452 .setup = exynos4_local_timer_setup,
453 .stop = exynos4_local_timer_stop, 453 .stop = exynos4_local_timer_stop,
454}; 454};
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index 6722f0e2fe40..9e4db41abe3c 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -109,7 +109,7 @@ unsigned long long sched_clock(void)
109 return ticks << HARDWARE_TO_NS_SHIFT; 109 return ticks << HARDWARE_TO_NS_SHIFT;
110} 110}
111 111
112static void __cpuinit arch_timer_setup(unsigned int cpu) 112static void arch_timer_setup(unsigned int cpu)
113{ 113{
114 unsigned int txdivtime; 114 unsigned int txdivtime;
115 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); 115 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -154,7 +154,7 @@ static void __cpuinit arch_timer_setup(unsigned int cpu)
154 } 154 }
155} 155}
156 156
157static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, 157static int arch_timer_cpu_notify(struct notifier_block *self,
158 unsigned long action, void *hcpu) 158 unsigned long action, void *hcpu)
159{ 159{
160 int cpu = (long)hcpu; 160 int cpu = (long)hcpu;
@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
169 return NOTIFY_OK; 169 return NOTIFY_OK;
170} 170}
171 171
172static struct notifier_block __cpuinitdata arch_timer_cpu_nb = { 172static struct notifier_block arch_timer_cpu_nb = {
173 .notifier_call = arch_timer_cpu_notify, 173 .notifier_call = arch_timer_cpu_notify,
174}; 174};
175 175
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index efdca3263afe..1b04b7e1d39b 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -167,7 +167,7 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
167/* 167/*
168 * Setup the local clock events for a CPU. 168 * Setup the local clock events for a CPU.
169 */ 169 */
170static int __cpuinit armada_370_xp_timer_setup(struct clock_event_device *evt) 170static int armada_370_xp_timer_setup(struct clock_event_device *evt)
171{ 171{
172 u32 u; 172 u32 u;
173 int cpu = smp_processor_id(); 173 int cpu = smp_processor_id();
@@ -205,7 +205,7 @@ static void armada_370_xp_timer_stop(struct clock_event_device *evt)
205 disable_percpu_irq(evt->irq); 205 disable_percpu_irq(evt->irq);
206} 206}
207 207
208static struct local_timer_ops armada_370_xp_local_timer_ops __cpuinitdata = { 208static struct local_timer_ops armada_370_xp_local_timer_ops = {
209 .setup = armada_370_xp_timer_setup, 209 .setup = armada_370_xp_timer_setup,
210 .stop = armada_370_xp_timer_stop, 210 .stop = armada_370_xp_timer_stop,
211}; 211};
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index e5dc9129ca26..62876baa3ab9 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -184,7 +184,7 @@ static struct irqaction sirfsoc_timer1_irq = {
184 .handler = sirfsoc_timer_interrupt, 184 .handler = sirfsoc_timer_interrupt,
185}; 185};
186 186
187static int __cpuinit sirfsoc_local_timer_setup(struct clock_event_device *ce) 187static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
188{ 188{
189 /* Use existing clock_event for cpu 0 */ 189 /* Use existing clock_event for cpu 0 */
190 if (!smp_processor_id()) 190 if (!smp_processor_id())
@@ -216,7 +216,7 @@ static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
216 remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); 216 remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
217} 217}
218 218
219static struct local_timer_ops sirfsoc_local_timer_ops __cpuinitdata = { 219static struct local_timer_ops sirfsoc_local_timer_ops = {
220 .setup = sirfsoc_local_timer_setup, 220 .setup = sirfsoc_local_timer_setup,
221 .stop = sirfsoc_local_timer_stop, 221 .stop = sirfsoc_local_timer_stop,
222}; 222};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0937b8d6c2a4..f0a5e2b0eb8a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1177,14 +1177,11 @@ static int __cpufreq_remove_dev(struct device *dev,
1177 __func__, cpu_dev->id, cpu); 1177 __func__, cpu_dev->id, cpu);
1178 } 1178 }
1179 1179
1180 if ((cpus == 1) && (cpufreq_driver->target))
1181 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1182
1183 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1184 cpufreq_cpu_put(data);
1185
1186 /* If cpu is last user of policy, free policy */ 1180 /* If cpu is last user of policy, free policy */
1187 if (cpus == 1) { 1181 if (cpus == 1) {
1182 if (cpufreq_driver->target)
1183 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1184
1188 lock_policy_rwsem_read(cpu); 1185 lock_policy_rwsem_read(cpu);
1189 kobj = &data->kobj; 1186 kobj = &data->kobj;
1190 cmp = &data->kobj_unregister; 1187 cmp = &data->kobj_unregister;
@@ -1205,9 +1202,13 @@ static int __cpufreq_remove_dev(struct device *dev,
1205 free_cpumask_var(data->related_cpus); 1202 free_cpumask_var(data->related_cpus);
1206 free_cpumask_var(data->cpus); 1203 free_cpumask_var(data->cpus);
1207 kfree(data); 1204 kfree(data);
1208 } else if (cpufreq_driver->target) { 1205 } else {
1209 __cpufreq_governor(data, CPUFREQ_GOV_START); 1206 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1210 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1207 cpufreq_cpu_put(data);
1208 if (cpufreq_driver->target) {
1209 __cpufreq_governor(data, CPUFREQ_GOV_START);
1210 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1211 }
1211 } 1212 }
1212 1213
1213 per_cpu(cpufreq_policy_cpu, cpu) = -1; 1214 per_cpu(cpufreq_policy_cpu, cpu) = -1;
@@ -1932,7 +1933,7 @@ no_policy:
1932} 1933}
1933EXPORT_SYMBOL(cpufreq_update_policy); 1934EXPORT_SYMBOL(cpufreq_update_policy);
1934 1935
1935static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, 1936static int cpufreq_cpu_callback(struct notifier_block *nfb,
1936 unsigned long action, void *hcpu) 1937 unsigned long action, void *hcpu)
1937{ 1938{
1938 unsigned int cpu = (unsigned long)hcpu; 1939 unsigned int cpu = (unsigned long)hcpu;
@@ -1942,13 +1943,15 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1942 if (dev) { 1943 if (dev) {
1943 switch (action) { 1944 switch (action) {
1944 case CPU_ONLINE: 1945 case CPU_ONLINE:
1946 case CPU_ONLINE_FROZEN:
1945 cpufreq_add_dev(dev, NULL); 1947 cpufreq_add_dev(dev, NULL);
1946 break; 1948 break;
1947 case CPU_DOWN_PREPARE: 1949 case CPU_DOWN_PREPARE:
1948 case CPU_UP_CANCELED_FROZEN: 1950 case CPU_DOWN_PREPARE_FROZEN:
1949 __cpufreq_remove_dev(dev, NULL); 1951 __cpufreq_remove_dev(dev, NULL);
1950 break; 1952 break;
1951 case CPU_DOWN_FAILED: 1953 case CPU_DOWN_FAILED:
1954 case CPU_DOWN_FAILED_FROZEN:
1952 cpufreq_add_dev(dev, NULL); 1955 cpufreq_add_dev(dev, NULL);
1953 break; 1956 break;
1954 } 1957 }
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0ceb2eff5a7e..f97cb3d8c5a2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
221 return count; 221 return count;
222} 222}
223 223
224static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, 224static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
225 size_t count) 225 const char *buf, size_t count)
226{ 226{
227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
228 unsigned int input, j; 228 unsigned int input, j;
@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
235 if (input > 1) 235 if (input > 1)
236 input = 1; 236 input = 1;
237 237
238 if (input == cs_tuners->ignore_nice) /* nothing to do */ 238 if (input == cs_tuners->ignore_nice_load) /* nothing to do */
239 return count; 239 return count;
240 240
241 cs_tuners->ignore_nice = input; 241 cs_tuners->ignore_nice_load = input;
242 242
243 /* we need to re-evaluate prev_cpu_idle */ 243 /* we need to re-evaluate prev_cpu_idle */
244 for_each_online_cpu(j) { 244 for_each_online_cpu(j) {
@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
246 dbs_info = &per_cpu(cs_cpu_dbs_info, j); 246 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
248 &dbs_info->cdbs.prev_cpu_wall, 0); 248 &dbs_info->cdbs.prev_cpu_wall, 0);
249 if (cs_tuners->ignore_nice) 249 if (cs_tuners->ignore_nice_load)
250 dbs_info->cdbs.prev_cpu_nice = 250 dbs_info->cdbs.prev_cpu_nice =
251 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 251 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
252 } 252 }
@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
279show_store_one(cs, sampling_down_factor); 279show_store_one(cs, sampling_down_factor);
280show_store_one(cs, up_threshold); 280show_store_one(cs, up_threshold);
281show_store_one(cs, down_threshold); 281show_store_one(cs, down_threshold);
282show_store_one(cs, ignore_nice); 282show_store_one(cs, ignore_nice_load);
283show_store_one(cs, freq_step); 283show_store_one(cs, freq_step);
284declare_show_sampling_rate_min(cs); 284declare_show_sampling_rate_min(cs);
285 285
@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
287gov_sys_pol_attr_rw(sampling_down_factor); 287gov_sys_pol_attr_rw(sampling_down_factor);
288gov_sys_pol_attr_rw(up_threshold); 288gov_sys_pol_attr_rw(up_threshold);
289gov_sys_pol_attr_rw(down_threshold); 289gov_sys_pol_attr_rw(down_threshold);
290gov_sys_pol_attr_rw(ignore_nice); 290gov_sys_pol_attr_rw(ignore_nice_load);
291gov_sys_pol_attr_rw(freq_step); 291gov_sys_pol_attr_rw(freq_step);
292gov_sys_pol_attr_ro(sampling_rate_min); 292gov_sys_pol_attr_ro(sampling_rate_min);
293 293
@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
297 &sampling_down_factor_gov_sys.attr, 297 &sampling_down_factor_gov_sys.attr,
298 &up_threshold_gov_sys.attr, 298 &up_threshold_gov_sys.attr,
299 &down_threshold_gov_sys.attr, 299 &down_threshold_gov_sys.attr,
300 &ignore_nice_gov_sys.attr, 300 &ignore_nice_load_gov_sys.attr,
301 &freq_step_gov_sys.attr, 301 &freq_step_gov_sys.attr,
302 NULL 302 NULL
303}; 303};
@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
313 &sampling_down_factor_gov_pol.attr, 313 &sampling_down_factor_gov_pol.attr,
314 &up_threshold_gov_pol.attr, 314 &up_threshold_gov_pol.attr,
315 &down_threshold_gov_pol.attr, 315 &down_threshold_gov_pol.attr,
316 &ignore_nice_gov_pol.attr, 316 &ignore_nice_load_gov_pol.attr,
317 &freq_step_gov_pol.attr, 317 &freq_step_gov_pol.attr,
318 NULL 318 NULL
319}; 319};
@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; 338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; 339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; 340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
341 tuners->ignore_nice = 0; 341 tuners->ignore_nice_load = 0;
342 tuners->freq_step = DEF_FREQUENCY_STEP; 342 tuners->freq_step = DEF_FREQUENCY_STEP;
343 343
344 dbs_data->tuners = tuners; 344 dbs_data->tuners = tuners;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 464587697561..e59afaa9da23 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -25,7 +25,6 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/cpu.h>
29 28
30#include "cpufreq_governor.h" 29#include "cpufreq_governor.h"
31 30
@@ -48,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
48 unsigned int j; 47 unsigned int j;
49 48
50 if (dbs_data->cdata->governor == GOV_ONDEMAND) 49 if (dbs_data->cdata->governor == GOV_ONDEMAND)
51 ignore_nice = od_tuners->ignore_nice; 50 ignore_nice = od_tuners->ignore_nice_load;
52 else 51 else
53 ignore_nice = cs_tuners->ignore_nice; 52 ignore_nice = cs_tuners->ignore_nice_load;
54 53
55 policy = cdbs->cur_policy; 54 policy = cdbs->cur_policy;
56 55
@@ -137,10 +136,8 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
137 if (!all_cpus) { 136 if (!all_cpus) {
138 __gov_queue_work(smp_processor_id(), dbs_data, delay); 137 __gov_queue_work(smp_processor_id(), dbs_data, delay);
139 } else { 138 } else {
140 get_online_cpus();
141 for_each_cpu(i, policy->cpus) 139 for_each_cpu(i, policy->cpus)
142 __gov_queue_work(i, dbs_data, delay); 140 __gov_queue_work(i, dbs_data, delay);
143 put_online_cpus();
144 } 141 }
145} 142}
146EXPORT_SYMBOL_GPL(gov_queue_work); 143EXPORT_SYMBOL_GPL(gov_queue_work);
@@ -301,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
301 cs_tuners = dbs_data->tuners; 298 cs_tuners = dbs_data->tuners;
302 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); 299 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
303 sampling_rate = cs_tuners->sampling_rate; 300 sampling_rate = cs_tuners->sampling_rate;
304 ignore_nice = cs_tuners->ignore_nice; 301 ignore_nice = cs_tuners->ignore_nice_load;
305 } else { 302 } else {
306 od_tuners = dbs_data->tuners; 303 od_tuners = dbs_data->tuners;
307 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); 304 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
308 sampling_rate = od_tuners->sampling_rate; 305 sampling_rate = od_tuners->sampling_rate;
309 ignore_nice = od_tuners->ignore_nice; 306 ignore_nice = od_tuners->ignore_nice_load;
310 od_ops = dbs_data->cdata->gov_ops; 307 od_ops = dbs_data->cdata->gov_ops;
311 io_busy = od_tuners->io_is_busy; 308 io_busy = od_tuners->io_is_busy;
312 } 309 }
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 6663ec3b3056..d5f12b4b11b8 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
165 165
166/* Per policy Governers sysfs tunables */ 166/* Per policy Governers sysfs tunables */
167struct od_dbs_tuners { 167struct od_dbs_tuners {
168 unsigned int ignore_nice; 168 unsigned int ignore_nice_load;
169 unsigned int sampling_rate; 169 unsigned int sampling_rate;
170 unsigned int sampling_down_factor; 170 unsigned int sampling_down_factor;
171 unsigned int up_threshold; 171 unsigned int up_threshold;
@@ -175,7 +175,7 @@ struct od_dbs_tuners {
175}; 175};
176 176
177struct cs_dbs_tuners { 177struct cs_dbs_tuners {
178 unsigned int ignore_nice; 178 unsigned int ignore_nice_load;
179 unsigned int sampling_rate; 179 unsigned int sampling_rate;
180 unsigned int sampling_down_factor; 180 unsigned int sampling_down_factor;
181 unsigned int up_threshold; 181 unsigned int up_threshold;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cbcc1f6..c087347d6688 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
403 return count; 403 return count;
404} 404}
405 405
406static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, 406static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
407 size_t count) 407 const char *buf, size_t count)
408{ 408{
409 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 409 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
410 unsigned int input; 410 unsigned int input;
@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
419 if (input > 1) 419 if (input > 1)
420 input = 1; 420 input = 1;
421 421
422 if (input == od_tuners->ignore_nice) { /* nothing to do */ 422 if (input == od_tuners->ignore_nice_load) { /* nothing to do */
423 return count; 423 return count;
424 } 424 }
425 od_tuners->ignore_nice = input; 425 od_tuners->ignore_nice_load = input;
426 426
427 /* we need to re-evaluate prev_cpu_idle */ 427 /* we need to re-evaluate prev_cpu_idle */
428 for_each_online_cpu(j) { 428 for_each_online_cpu(j) {
@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
430 dbs_info = &per_cpu(od_cpu_dbs_info, j); 430 dbs_info = &per_cpu(od_cpu_dbs_info, j);
431 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 431 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
432 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); 432 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
433 if (od_tuners->ignore_nice) 433 if (od_tuners->ignore_nice_load)
434 dbs_info->cdbs.prev_cpu_nice = 434 dbs_info->cdbs.prev_cpu_nice =
435 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 435 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
436 436
@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
461show_store_one(od, io_is_busy); 461show_store_one(od, io_is_busy);
462show_store_one(od, up_threshold); 462show_store_one(od, up_threshold);
463show_store_one(od, sampling_down_factor); 463show_store_one(od, sampling_down_factor);
464show_store_one(od, ignore_nice); 464show_store_one(od, ignore_nice_load);
465show_store_one(od, powersave_bias); 465show_store_one(od, powersave_bias);
466declare_show_sampling_rate_min(od); 466declare_show_sampling_rate_min(od);
467 467
@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
469gov_sys_pol_attr_rw(io_is_busy); 469gov_sys_pol_attr_rw(io_is_busy);
470gov_sys_pol_attr_rw(up_threshold); 470gov_sys_pol_attr_rw(up_threshold);
471gov_sys_pol_attr_rw(sampling_down_factor); 471gov_sys_pol_attr_rw(sampling_down_factor);
472gov_sys_pol_attr_rw(ignore_nice); 472gov_sys_pol_attr_rw(ignore_nice_load);
473gov_sys_pol_attr_rw(powersave_bias); 473gov_sys_pol_attr_rw(powersave_bias);
474gov_sys_pol_attr_ro(sampling_rate_min); 474gov_sys_pol_attr_ro(sampling_rate_min);
475 475
@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
478 &sampling_rate_gov_sys.attr, 478 &sampling_rate_gov_sys.attr,
479 &up_threshold_gov_sys.attr, 479 &up_threshold_gov_sys.attr,
480 &sampling_down_factor_gov_sys.attr, 480 &sampling_down_factor_gov_sys.attr,
481 &ignore_nice_gov_sys.attr, 481 &ignore_nice_load_gov_sys.attr,
482 &powersave_bias_gov_sys.attr, 482 &powersave_bias_gov_sys.attr,
483 &io_is_busy_gov_sys.attr, 483 &io_is_busy_gov_sys.attr,
484 NULL 484 NULL
@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
494 &sampling_rate_gov_pol.attr, 494 &sampling_rate_gov_pol.attr,
495 &up_threshold_gov_pol.attr, 495 &up_threshold_gov_pol.attr,
496 &sampling_down_factor_gov_pol.attr, 496 &sampling_down_factor_gov_pol.attr,
497 &ignore_nice_gov_pol.attr, 497 &ignore_nice_load_gov_pol.attr,
498 &powersave_bias_gov_pol.attr, 498 &powersave_bias_gov_pol.attr,
499 &io_is_busy_gov_pol.attr, 499 &io_is_busy_gov_pol.attr,
500 NULL 500 NULL
@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data)
544 } 544 }
545 545
546 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; 546 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
547 tuners->ignore_nice = 0; 547 tuners->ignore_nice_load = 0;
548 tuners->powersave_bias = default_powersave_bias; 548 tuners->powersave_bias = default_powersave_bias;
549 tuners->io_is_busy = should_io_be_busy(); 549 tuners->io_is_busy = should_io_be_busy();
550 550
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index cd9e81713a71..d37568c5ca9c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -341,7 +341,7 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
341 return 0; 341 return 0;
342} 342}
343 343
344static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, 344static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
345 unsigned long action, 345 unsigned long action,
346 void *hcpu) 346 void *hcpu)
347{ 347{
@@ -353,13 +353,11 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
353 cpufreq_update_policy(cpu); 353 cpufreq_update_policy(cpu);
354 break; 354 break;
355 case CPU_DOWN_PREPARE: 355 case CPU_DOWN_PREPARE:
356 case CPU_DOWN_PREPARE_FROZEN:
356 cpufreq_stats_free_sysfs(cpu); 357 cpufreq_stats_free_sysfs(cpu);
357 break; 358 break;
358 case CPU_DEAD: 359 case CPU_DEAD:
359 cpufreq_stats_free_table(cpu); 360 case CPU_DEAD_FROZEN:
360 break;
361 case CPU_UP_CANCELED_FROZEN:
362 cpufreq_stats_free_sysfs(cpu);
363 cpufreq_stats_free_table(cpu); 361 cpufreq_stats_free_table(cpu);
364 break; 362 break;
365 } 363 }
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 1fdb02b9f1ec..26321cdc1946 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -82,7 +82,7 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
82 return freq_table[i].frequency; 82 return freq_table[i].frequency;
83} 83}
84 84
85static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy) 85static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
86{ 86{
87 int res; 87 int res;
88 88
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 07f2840ad805..7cde885011ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@ struct pstate_adjust_policy {
103static struct pstate_adjust_policy default_policy = { 103static struct pstate_adjust_policy default_policy = {
104 .sample_rate_ms = 10, 104 .sample_rate_ms = 10,
105 .deadband = 0, 105 .deadband = 0,
106 .setpoint = 109, 106 .setpoint = 97,
107 .p_gain_pct = 17, 107 .p_gain_pct = 20,
108 .d_gain_pct = 0, 108 .d_gain_pct = 0,
109 .i_gain_pct = 4, 109 .i_gain_pct = 0,
110}; 110};
111 111
112struct perf_limits { 112struct perf_limits {
@@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
469{ 469{
470 int32_t busy_scaled; 470 int32_t busy_scaled;
471 int32_t core_busy, turbo_pstate, current_pstate; 471 int32_t core_busy, max_pstate, current_pstate;
472 472
473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
474 turbo_pstate = int_tofp(cpu->pstate.turbo_pstate); 474 max_pstate = int_tofp(cpu->pstate.max_pstate);
475 current_pstate = int_tofp(cpu->pstate.current_pstate); 475 current_pstate = int_tofp(cpu->pstate.current_pstate);
476 busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate)); 476 busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
477 477
478 return fp_toint(busy_scaled); 478 return fp_toint(busy_scaled);
479} 479}
@@ -617,7 +617,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
617 return 0; 617 return 0;
618} 618}
619 619
620static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy) 620static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
621{ 621{
622 int cpu = policy->cpu; 622 int cpu = policy->cpu;
623 623
@@ -627,7 +627,7 @@ static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
627 return 0; 627 return 0;
628} 628}
629 629
630static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy) 630static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
631{ 631{
632 int rc, min_pstate, max_pstate; 632 int rc, min_pstate, max_pstate;
633 struct cpudata *cpu; 633 struct cpudata *cpu;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index b6a0a7a406b0..8c49261df57d 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -422,7 +422,7 @@ static int guess_fsb(int mult)
422} 422}
423 423
424 424
425static int __cpuinit longhaul_get_ranges(void) 425static int longhaul_get_ranges(void)
426{ 426{
427 unsigned int i, j, k = 0; 427 unsigned int i, j, k = 0;
428 unsigned int ratio; 428 unsigned int ratio;
@@ -526,7 +526,7 @@ static int __cpuinit longhaul_get_ranges(void)
526} 526}
527 527
528 528
529static void __cpuinit longhaul_setup_voltagescaling(void) 529static void longhaul_setup_voltagescaling(void)
530{ 530{
531 union msr_longhaul longhaul; 531 union msr_longhaul longhaul;
532 struct mV_pos minvid, maxvid, vid; 532 struct mV_pos minvid, maxvid, vid;
@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void)
780 return 0; 780 return 0;
781} 781}
782 782
783static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) 783static int longhaul_cpu_init(struct cpufreq_policy *policy)
784{ 784{
785 struct cpuinfo_x86 *c = &cpu_data(0); 785 struct cpuinfo_x86 *c = &cpu_data(0);
786 char *cpuname = NULL; 786 char *cpuname = NULL;
diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h
index e2dc436099d1..1928b923a57b 100644
--- a/drivers/cpufreq/longhaul.h
+++ b/drivers/cpufreq/longhaul.h
@@ -56,7 +56,7 @@ union msr_longhaul {
56/* 56/*
57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0) 57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
58 */ 58 */
59static const int __cpuinitconst samuel1_mults[16] = { 59static const int samuel1_mults[16] = {
60 -1, /* 0000 -> RESERVED */ 60 -1, /* 0000 -> RESERVED */
61 30, /* 0001 -> 3.0x */ 61 30, /* 0001 -> 3.0x */
62 40, /* 0010 -> 4.0x */ 62 40, /* 0010 -> 4.0x */
@@ -75,7 +75,7 @@ static const int __cpuinitconst samuel1_mults[16] = {
75 -1, /* 1111 -> RESERVED */ 75 -1, /* 1111 -> RESERVED */
76}; 76};
77 77
78static const int __cpuinitconst samuel1_eblcr[16] = { 78static const int samuel1_eblcr[16] = {
79 50, /* 0000 -> RESERVED */ 79 50, /* 0000 -> RESERVED */
80 30, /* 0001 -> 3.0x */ 80 30, /* 0001 -> 3.0x */
81 40, /* 0010 -> 4.0x */ 81 40, /* 0010 -> 4.0x */
@@ -97,7 +97,7 @@ static const int __cpuinitconst samuel1_eblcr[16] = {
97/* 97/*
98 * VIA C3 Samuel2 Stepping 1->15 98 * VIA C3 Samuel2 Stepping 1->15
99 */ 99 */
100static const int __cpuinitconst samuel2_eblcr[16] = { 100static const int samuel2_eblcr[16] = {
101 50, /* 0000 -> 5.0x */ 101 50, /* 0000 -> 5.0x */
102 30, /* 0001 -> 3.0x */ 102 30, /* 0001 -> 3.0x */
103 40, /* 0010 -> 4.0x */ 103 40, /* 0010 -> 4.0x */
@@ -119,7 +119,7 @@ static const int __cpuinitconst samuel2_eblcr[16] = {
119/* 119/*
120 * VIA C3 Ezra 120 * VIA C3 Ezra
121 */ 121 */
122static const int __cpuinitconst ezra_mults[16] = { 122static const int ezra_mults[16] = {
123 100, /* 0000 -> 10.0x */ 123 100, /* 0000 -> 10.0x */
124 30, /* 0001 -> 3.0x */ 124 30, /* 0001 -> 3.0x */
125 40, /* 0010 -> 4.0x */ 125 40, /* 0010 -> 4.0x */
@@ -138,7 +138,7 @@ static const int __cpuinitconst ezra_mults[16] = {
138 120, /* 1111 -> 12.0x */ 138 120, /* 1111 -> 12.0x */
139}; 139};
140 140
141static const int __cpuinitconst ezra_eblcr[16] = { 141static const int ezra_eblcr[16] = {
142 50, /* 0000 -> 5.0x */ 142 50, /* 0000 -> 5.0x */
143 30, /* 0001 -> 3.0x */ 143 30, /* 0001 -> 3.0x */
144 40, /* 0010 -> 4.0x */ 144 40, /* 0010 -> 4.0x */
@@ -160,7 +160,7 @@ static const int __cpuinitconst ezra_eblcr[16] = {
160/* 160/*
161 * VIA C3 (Ezra-T) [C5M]. 161 * VIA C3 (Ezra-T) [C5M].
162 */ 162 */
163static const int __cpuinitconst ezrat_mults[32] = { 163static const int ezrat_mults[32] = {
164 100, /* 0000 -> 10.0x */ 164 100, /* 0000 -> 10.0x */
165 30, /* 0001 -> 3.0x */ 165 30, /* 0001 -> 3.0x */
166 40, /* 0010 -> 4.0x */ 166 40, /* 0010 -> 4.0x */
@@ -196,7 +196,7 @@ static const int __cpuinitconst ezrat_mults[32] = {
196 -1, /* 1111 -> RESERVED (12.0x) */ 196 -1, /* 1111 -> RESERVED (12.0x) */
197}; 197};
198 198
199static const int __cpuinitconst ezrat_eblcr[32] = { 199static const int ezrat_eblcr[32] = {
200 50, /* 0000 -> 5.0x */ 200 50, /* 0000 -> 5.0x */
201 30, /* 0001 -> 3.0x */ 201 30, /* 0001 -> 3.0x */
202 40, /* 0010 -> 4.0x */ 202 40, /* 0010 -> 4.0x */
@@ -235,7 +235,7 @@ static const int __cpuinitconst ezrat_eblcr[32] = {
235/* 235/*
236 * VIA C3 Nehemiah */ 236 * VIA C3 Nehemiah */
237 237
238static const int __cpuinitconst nehemiah_mults[32] = { 238static const int nehemiah_mults[32] = {
239 100, /* 0000 -> 10.0x */ 239 100, /* 0000 -> 10.0x */
240 -1, /* 0001 -> 16.0x */ 240 -1, /* 0001 -> 16.0x */
241 40, /* 0010 -> 4.0x */ 241 40, /* 0010 -> 4.0x */
@@ -270,7 +270,7 @@ static const int __cpuinitconst nehemiah_mults[32] = {
270 -1, /* 1111 -> 12.0x */ 270 -1, /* 1111 -> 12.0x */
271}; 271};
272 272
273static const int __cpuinitconst nehemiah_eblcr[32] = { 273static const int nehemiah_eblcr[32] = {
274 50, /* 0000 -> 5.0x */ 274 50, /* 0000 -> 5.0x */
275 160, /* 0001 -> 16.0x */ 275 160, /* 0001 -> 16.0x */
276 40, /* 0010 -> 4.0x */ 276 40, /* 0010 -> 4.0x */
@@ -315,7 +315,7 @@ struct mV_pos {
315 unsigned short pos; 315 unsigned short pos;
316}; 316};
317 317
318static const struct mV_pos __cpuinitconst vrm85_mV[32] = { 318static const struct mV_pos vrm85_mV[32] = {
319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, 319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, 320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, 321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
@@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} 326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
327}; 327};
328 328
329static const unsigned char __cpuinitconst mV_vrm85[32] = { 329static const unsigned char mV_vrm85[32] = {
330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
334}; 334};
335 335
336static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = { 336static const struct mV_pos mobilevrm_mV[32] = {
337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, 337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, 338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, 339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
@@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
344 {675, 3}, {650, 2}, {625, 1}, {600, 0} 344 {675, 3}, {650, 2}, {625, 1}, {600, 0}
345}; 345};
346 346
347static const unsigned char __cpuinitconst mV_mobilevrm[32] = { 347static const unsigned char mV_mobilevrm[32] = {
348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 8bc9f5fbbaeb..0fe041d1f77f 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -33,7 +33,7 @@ static unsigned int longrun_low_freq, longrun_high_freq;
33 * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS 33 * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
34 * and MSR_TMTA_LONGRUN_CTRL 34 * and MSR_TMTA_LONGRUN_CTRL
35 */ 35 */
36static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) 36static void longrun_get_policy(struct cpufreq_policy *policy)
37{ 37{
38 u32 msr_lo, msr_hi; 38 u32 msr_lo, msr_hi;
39 39
@@ -163,7 +163,7 @@ static unsigned int longrun_get(unsigned int cpu)
163 * TMTA rules: 163 * TMTA rules:
164 * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) 164 * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
165 */ 165 */
166static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, 166static int longrun_determine_freqs(unsigned int *low_freq,
167 unsigned int *high_freq) 167 unsigned int *high_freq)
168{ 168{
169 u32 msr_lo, msr_hi; 169 u32 msr_lo, msr_hi;
@@ -256,7 +256,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
256} 256}
257 257
258 258
259static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy) 259static int longrun_cpu_init(struct cpufreq_policy *policy)
260{ 260{
261 int result = 0; 261 int result = 0;
262 262
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index bb838b985077..9536852c504a 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
118 clk_put(cpuclk); 118 clk_put(cpuclk);
119 return -EINVAL; 119 return -EINVAL;
120 } 120 }
121 ret = clk_set_rate(cpuclk, rate);
122 if (ret) {
123 clk_put(cpuclk);
124 return ret;
125 }
126 121
127 /* clock table init */ 122 /* clock table init */
128 for (i = 2; 123 for (i = 2;
@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
130 i++) 125 i++)
131 loongson2_clockmod_table[i].frequency = (rate * i) / 8; 126 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
132 127
128 ret = clk_set_rate(cpuclk, rate);
129 if (ret) {
130 clk_put(cpuclk);
131 return ret;
132 }
133
133 policy->cur = loongson2_cpufreq_get(policy->cpu); 134 policy->cur = loongson2_cpufreq_get(policy->cpu);
134 135
135 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], 136 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 29468a522ee9..f31fcfcad514 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -165,7 +165,7 @@ static inline void freq_table_free(void)
165 opp_free_cpufreq_table(mpu_dev, &freq_table); 165 opp_free_cpufreq_table(mpu_dev, &freq_table);
166} 166}
167 167
168static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) 168static int omap_cpu_init(struct cpufreq_policy *policy)
169{ 169{
170 int result = 0; 170 int result = 0;
171 171
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index b9f80b713fda..955870877935 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -563,7 +563,7 @@ static int powernow_verify(struct cpufreq_policy *policy)
563 * We will then get the same kind of behaviour already tested under 563 * We will then get the same kind of behaviour already tested under
564 * the "well-known" other OS. 564 * the "well-known" other OS.
565 */ 565 */
566static int __cpuinit fixup_sgtc(void) 566static int fixup_sgtc(void)
567{ 567{
568 unsigned int sgtc; 568 unsigned int sgtc;
569 unsigned int m; 569 unsigned int m;
@@ -597,7 +597,7 @@ static unsigned int powernow_get(unsigned int cpu)
597} 597}
598 598
599 599
600static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d) 600static int acer_cpufreq_pst(const struct dmi_system_id *d)
601{ 601{
602 printk(KERN_WARNING PFX 602 printk(KERN_WARNING PFX
603 "%s laptop with broken PST tables in BIOS detected.\n", 603 "%s laptop with broken PST tables in BIOS detected.\n",
@@ -615,7 +615,7 @@ static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d)
615 * A BIOS update is all that can save them. 615 * A BIOS update is all that can save them.
616 * Mention this, and disable cpufreq. 616 * Mention this, and disable cpufreq.
617 */ 617 */
618static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = { 618static struct dmi_system_id powernow_dmi_table[] = {
619 { 619 {
620 .callback = acer_cpufreq_pst, 620 .callback = acer_cpufreq_pst,
621 .ident = "Acer Aspire", 621 .ident = "Acer Aspire",
@@ -627,7 +627,7 @@ static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = {
627 { } 627 { }
628}; 628};
629 629
630static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) 630static int powernow_cpu_init(struct cpufreq_policy *policy)
631{ 631{
632 union msr_fidvidstatus fidvidstatus; 632 union msr_fidvidstatus fidvidstatus;
633 int result; 633 int result;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 78f018f2a5de..c39d189217cb 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1069,7 +1069,7 @@ struct init_on_cpu {
1069 int rc; 1069 int rc;
1070}; 1070};
1071 1071
1072static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) 1072static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1073{ 1073{
1074 struct init_on_cpu *init_on_cpu = _init_on_cpu; 1074 struct init_on_cpu *init_on_cpu = _init_on_cpu;
1075 1075
@@ -1096,7 +1096,7 @@ static const char missing_pss_msg[] =
1096 FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n"; 1096 FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
1097 1097
1098/* per CPU init entry point to the driver */ 1098/* per CPU init entry point to the driver */
1099static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1099static int powernowk8_cpu_init(struct cpufreq_policy *pol)
1100{ 1100{
1101 struct powernow_k8_data *data; 1101 struct powernow_k8_data *data;
1102 struct init_on_cpu init_on_cpu; 1102 struct init_on_cpu init_on_cpu;
@@ -1263,7 +1263,7 @@ static void __request_acpi_cpufreq(void)
1263} 1263}
1264 1264
1265/* driver entry point for init */ 1265/* driver entry point for init */
1266static int __cpuinit powernowk8_init(void) 1266static int powernowk8_init(void)
1267{ 1267{
1268 unsigned int i, supported_cpus = 0; 1268 unsigned int i, supported_cpus = 0;
1269 int ret; 1269 int ret;
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 3513e7477160..87781eb20d6d 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -49,7 +49,7 @@ static struct clk *clk_hclk;
49static struct clk *clk_pclk; 49static struct clk *clk_pclk;
50static struct clk *clk_arm; 50static struct clk *clk_arm;
51 51
52#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS 52#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
53struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void) 53struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
54{ 54{
55 return &cpu_cur; 55 return &cpu_cur;
@@ -59,7 +59,7 @@ struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
59{ 59{
60 return &s3c24xx_iotiming; 60 return &s3c24xx_iotiming;
61} 61}
62#endif /* CONFIG_CPU_FREQ_S3C24XX_DEBUGFS */ 62#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS */
63 63
64static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg) 64static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
65{ 65{
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index fe343a06b7da..bc580b67a652 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -28,13 +28,6 @@
28#define MAX_INTERESTING 50000 28#define MAX_INTERESTING 50000
29#define STDDEV_THRESH 400 29#define STDDEV_THRESH 400
30 30
31/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
32#define MAX_DEVIATION 60
33
34static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
35static DEFINE_PER_CPU(int, hrtimer_status);
36/* menu hrtimer mode */
37enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
38 31
39/* 32/*
40 * Concepts and ideas behind the menu governor 33 * Concepts and ideas behind the menu governor
@@ -116,13 +109,6 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
116 * 109 *
117 */ 110 */
118 111
119/*
120 * The C-state residency is so long that is is worthwhile to exit
121 * from the shallow C-state and re-enter into a deeper C-state.
122 */
123static unsigned int perfect_cstate_ms __read_mostly = 30;
124module_param(perfect_cstate_ms, uint, 0000);
125
126struct menu_device { 112struct menu_device {
127 int last_state_idx; 113 int last_state_idx;
128 int needs_update; 114 int needs_update;
@@ -205,52 +191,17 @@ static u64 div_round64(u64 dividend, u32 divisor)
205 return div_u64(dividend + (divisor / 2), divisor); 191 return div_u64(dividend + (divisor / 2), divisor);
206} 192}
207 193
208/* Cancel the hrtimer if it is not triggered yet */
209void menu_hrtimer_cancel(void)
210{
211 int cpu = smp_processor_id();
212 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
213
214 /* The timer is still not time out*/
215 if (per_cpu(hrtimer_status, cpu)) {
216 hrtimer_cancel(hrtmr);
217 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
218 }
219}
220EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
221
222/* Call back for hrtimer is triggered */
223static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
224{
225 int cpu = smp_processor_id();
226 struct menu_device *data = &per_cpu(menu_devices, cpu);
227
228 /* In general case, the expected residency is much larger than
229 * deepest C-state target residency, but prediction logic still
230 * predicts a small predicted residency, so the prediction
231 * history is totally broken if the timer is triggered.
232 * So reset the correction factor.
233 */
234 if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
235 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
236
237 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
238
239 return HRTIMER_NORESTART;
240}
241
242/* 194/*
243 * Try detecting repeating patterns by keeping track of the last 8 195 * Try detecting repeating patterns by keeping track of the last 8
244 * intervals, and checking if the standard deviation of that set 196 * intervals, and checking if the standard deviation of that set
245 * of points is below a threshold. If it is... then use the 197 * of points is below a threshold. If it is... then use the
246 * average of these 8 points as the estimated value. 198 * average of these 8 points as the estimated value.
247 */ 199 */
248static u32 get_typical_interval(struct menu_device *data) 200static void get_typical_interval(struct menu_device *data)
249{ 201{
250 int i = 0, divisor = 0; 202 int i = 0, divisor = 0;
251 uint64_t max = 0, avg = 0, stddev = 0; 203 uint64_t max = 0, avg = 0, stddev = 0;
252 int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ 204 int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
253 unsigned int ret = 0;
254 205
255again: 206again:
256 207
@@ -291,16 +242,13 @@ again:
291 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) 242 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
292 || stddev <= 20) { 243 || stddev <= 20) {
293 data->predicted_us = avg; 244 data->predicted_us = avg;
294 ret = 1; 245 return;
295 return ret;
296 246
297 } else if ((divisor * 4) > INTERVALS * 3) { 247 } else if ((divisor * 4) > INTERVALS * 3) {
298 /* Exclude the max interval */ 248 /* Exclude the max interval */
299 thresh = max - 1; 249 thresh = max - 1;
300 goto again; 250 goto again;
301 } 251 }
302
303 return ret;
304} 252}
305 253
306/** 254/**
@@ -315,9 +263,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
315 int i; 263 int i;
316 int multiplier; 264 int multiplier;
317 struct timespec t; 265 struct timespec t;
318 int repeat = 0, low_predicted = 0;
319 int cpu = smp_processor_id();
320 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
321 266
322 if (data->needs_update) { 267 if (data->needs_update) {
323 menu_update(drv, dev); 268 menu_update(drv, dev);
@@ -352,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
352 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], 297 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
353 RESOLUTION * DECAY); 298 RESOLUTION * DECAY);
354 299
355 repeat = get_typical_interval(data); 300 get_typical_interval(data);
356 301
357 /* 302 /*
358 * We want to default to C1 (hlt), not to busy polling 303 * We want to default to C1 (hlt), not to busy polling
@@ -373,10 +318,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
373 318
374 if (s->disabled || su->disable) 319 if (s->disabled || su->disable)
375 continue; 320 continue;
376 if (s->target_residency > data->predicted_us) { 321 if (s->target_residency > data->predicted_us)
377 low_predicted = 1;
378 continue; 322 continue;
379 }
380 if (s->exit_latency > latency_req) 323 if (s->exit_latency > latency_req)
381 continue; 324 continue;
382 if (s->exit_latency * multiplier > data->predicted_us) 325 if (s->exit_latency * multiplier > data->predicted_us)
@@ -386,44 +329,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
386 data->exit_us = s->exit_latency; 329 data->exit_us = s->exit_latency;
387 } 330 }
388 331
389 /* not deepest C-state chosen for low predicted residency */
390 if (low_predicted) {
391 unsigned int timer_us = 0;
392 unsigned int perfect_us = 0;
393
394 /*
395 * Set a timer to detect whether this sleep is much
396 * longer than repeat mode predicted. If the timer
397 * triggers, the code will evaluate whether to put
398 * the CPU into a deeper C-state.
399 * The timer is cancelled on CPU wakeup.
400 */
401 timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
402
403 perfect_us = perfect_cstate_ms * 1000;
404
405 if (repeat && (4 * timer_us < data->expected_us)) {
406 RCU_NONIDLE(hrtimer_start(hrtmr,
407 ns_to_ktime(1000 * timer_us),
408 HRTIMER_MODE_REL_PINNED));
409 /* In repeat case, menu hrtimer is started */
410 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
411 } else if (perfect_us < data->expected_us) {
412 /*
413 * The next timer is long. This could be because
414 * we did not make a useful prediction.
415 * In that case, it makes sense to re-enter
416 * into a deeper C-state after some time.
417 */
418 RCU_NONIDLE(hrtimer_start(hrtmr,
419 ns_to_ktime(1000 * timer_us),
420 HRTIMER_MODE_REL_PINNED));
421 /* In general case, menu hrtimer is started */
422 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
423 }
424
425 }
426
427 return data->last_state_idx; 332 return data->last_state_idx;
428} 333}
429 334
@@ -514,9 +419,6 @@ static int menu_enable_device(struct cpuidle_driver *drv,
514 struct cpuidle_device *dev) 419 struct cpuidle_device *dev)
515{ 420{
516 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 421 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
517 struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
518 hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
519 t->function = menu_hrtimer_notify;
520 422
521 memset(data, 0, sizeof(struct menu_device)); 423 memset(data, 0, sizeof(struct menu_device));
522 424
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5996521a1caf..84573b4d6f92 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -429,7 +429,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
429 dma_addr_t src_dma, dst_dma; 429 dma_addr_t src_dma, dst_dma;
430 int ret = 0; 430 int ret = 0;
431 431
432 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 432 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
433 if (!desc) { 433 if (!desc) {
434 dev_err(jrdev, "unable to allocate key input memory\n"); 434 dev_err(jrdev, "unable to allocate key input memory\n");
435 return -ENOMEM; 435 return -ENOMEM;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ce3dc3e9688c..0bbdea5059f3 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -867,6 +867,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
867 867
868 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 868 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
869 dev_err(&pdev->dev, "Cannot find proper base address\n"); 869 dev_err(&pdev->dev, "Cannot find proper base address\n");
870 err = -ENODEV;
870 goto err_disable_pdev; 871 goto err_disable_pdev;
871 } 872 }
872 873
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 593827b3fdd4..fa645d825009 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2505 /* Assign cookies to all nodes */ 2505 /* Assign cookies to all nodes */
2506 while (!list_empty(&last->node)) { 2506 while (!list_empty(&last->node)) {
2507 desc = list_entry(last->node.next, struct dma_pl330_desc, node); 2507 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2508 if (pch->cyclic) {
2509 desc->txd.callback = last->txd.callback;
2510 desc->txd.callback_param = last->txd.callback_param;
2511 }
2508 2512
2509 dma_cookie_assign(&desc->txd); 2513 dma_cookie_assign(&desc->txd);
2510 2514
@@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2688 size_t period_len, enum dma_transfer_direction direction, 2692 size_t period_len, enum dma_transfer_direction direction,
2689 unsigned long flags, void *context) 2693 unsigned long flags, void *context)
2690{ 2694{
2691 struct dma_pl330_desc *desc; 2695 struct dma_pl330_desc *desc = NULL, *first = NULL;
2692 struct dma_pl330_chan *pch = to_pchan(chan); 2696 struct dma_pl330_chan *pch = to_pchan(chan);
2697 struct dma_pl330_dmac *pdmac = pch->dmac;
2698 unsigned int i;
2693 dma_addr_t dst; 2699 dma_addr_t dst;
2694 dma_addr_t src; 2700 dma_addr_t src;
2695 2701
2696 desc = pl330_get_desc(pch); 2702 if (len % period_len != 0)
2697 if (!desc) {
2698 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2699 __func__, __LINE__);
2700 return NULL; 2703 return NULL;
2701 }
2702 2704
2703 switch (direction) { 2705 if (!is_slave_direction(direction)) {
2704 case DMA_MEM_TO_DEV:
2705 desc->rqcfg.src_inc = 1;
2706 desc->rqcfg.dst_inc = 0;
2707 desc->req.rqtype = MEMTODEV;
2708 src = dma_addr;
2709 dst = pch->fifo_addr;
2710 break;
2711 case DMA_DEV_TO_MEM:
2712 desc->rqcfg.src_inc = 0;
2713 desc->rqcfg.dst_inc = 1;
2714 desc->req.rqtype = DEVTOMEM;
2715 src = pch->fifo_addr;
2716 dst = dma_addr;
2717 break;
2718 default:
2719 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", 2706 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
2720 __func__, __LINE__); 2707 __func__, __LINE__);
2721 return NULL; 2708 return NULL;
2722 } 2709 }
2723 2710
2724 desc->rqcfg.brst_size = pch->burst_sz; 2711 for (i = 0; i < len / period_len; i++) {
2725 desc->rqcfg.brst_len = 1; 2712 desc = pl330_get_desc(pch);
2713 if (!desc) {
2714 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2715 __func__, __LINE__);
2726 2716
2727 pch->cyclic = true; 2717 if (!first)
2718 return NULL;
2719
2720 spin_lock_irqsave(&pdmac->pool_lock, flags);
2721
2722 while (!list_empty(&first->node)) {
2723 desc = list_entry(first->node.next,
2724 struct dma_pl330_desc, node);
2725 list_move_tail(&desc->node, &pdmac->desc_pool);
2726 }
2727
2728 list_move_tail(&first->node, &pdmac->desc_pool);
2728 2729
2729 fill_px(&desc->px, dst, src, period_len); 2730 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2731
2732 return NULL;
2733 }
2734
2735 switch (direction) {
2736 case DMA_MEM_TO_DEV:
2737 desc->rqcfg.src_inc = 1;
2738 desc->rqcfg.dst_inc = 0;
2739 desc->req.rqtype = MEMTODEV;
2740 src = dma_addr;
2741 dst = pch->fifo_addr;
2742 break;
2743 case DMA_DEV_TO_MEM:
2744 desc->rqcfg.src_inc = 0;
2745 desc->rqcfg.dst_inc = 1;
2746 desc->req.rqtype = DEVTOMEM;
2747 src = pch->fifo_addr;
2748 dst = dma_addr;
2749 break;
2750 default:
2751 break;
2752 }
2753
2754 desc->rqcfg.brst_size = pch->burst_sz;
2755 desc->rqcfg.brst_len = 1;
2756 fill_px(&desc->px, dst, src, period_len);
2757
2758 if (!first)
2759 first = desc;
2760 else
2761 list_add_tail(&desc->node, &first->node);
2762
2763 dma_addr += period_len;
2764 }
2765
2766 if (!desc)
2767 return NULL;
2768
2769 pch->cyclic = true;
2770 desc->txd.flags = flags;
2730 2771
2731 return &desc->txd; 2772 return &desc->txd;
2732} 2773}
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index b67f45f5c271..5039fbc88254 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan,
400 shdma_chan); 400 shdma_chan);
401 struct sh_dmae_desc *sh_desc = container_of(sdesc, 401 struct sh_dmae_desc *sh_desc = container_of(sdesc,
402 struct sh_dmae_desc, shdma_desc); 402 struct sh_dmae_desc, shdma_desc);
403 return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 403 return sh_desc->hw.tcr -
404 sh_chan->xmit_shift; 404 (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
405} 405}
406 406
407/* Called from error IRQ or NMI */ 407/* Called from error IRQ or NMI */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 27e86d938262..89e109022d78 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -48,6 +48,8 @@ static LIST_HEAD(mc_devices);
48 */ 48 */
49static void const *edac_mc_owner; 49static void const *edac_mc_owner;
50 50
51static struct bus_type mc_bus[EDAC_MAX_MCS];
52
51unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, 53unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
52 unsigned len) 54 unsigned len)
53{ 55{
@@ -723,6 +725,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
723 int ret = -EINVAL; 725 int ret = -EINVAL;
724 edac_dbg(0, "\n"); 726 edac_dbg(0, "\n");
725 727
728 if (mci->mc_idx >= EDAC_MAX_MCS) {
729 pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
730 return -ENODEV;
731 }
732
726#ifdef CONFIG_EDAC_DEBUG 733#ifdef CONFIG_EDAC_DEBUG
727 if (edac_debug_level >= 3) 734 if (edac_debug_level >= 3)
728 edac_mc_dump_mci(mci); 735 edac_mc_dump_mci(mci);
@@ -762,6 +769,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
762 /* set load time so that error rate can be tracked */ 769 /* set load time so that error rate can be tracked */
763 mci->start_time = jiffies; 770 mci->start_time = jiffies;
764 771
772 mci->bus = &mc_bus[mci->mc_idx];
773
765 if (edac_create_sysfs_mci_device(mci)) { 774 if (edac_create_sysfs_mci_device(mci)) {
766 edac_mc_printk(mci, KERN_WARNING, 775 edac_mc_printk(mci, KERN_WARNING,
767 "failed to create sysfs device\n"); 776 "failed to create sysfs device\n");
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ef15a7e613bc..e7c32c4f7837 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -370,7 +370,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
370 return -ENODEV; 370 return -ENODEV;
371 371
372 csrow->dev.type = &csrow_attr_type; 372 csrow->dev.type = &csrow_attr_type;
373 csrow->dev.bus = &mci->bus; 373 csrow->dev.bus = mci->bus;
374 device_initialize(&csrow->dev); 374 device_initialize(&csrow->dev);
375 csrow->dev.parent = &mci->dev; 375 csrow->dev.parent = &mci->dev;
376 csrow->mci = mci; 376 csrow->mci = mci;
@@ -605,7 +605,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
605 dimm->mci = mci; 605 dimm->mci = mci;
606 606
607 dimm->dev.type = &dimm_attr_type; 607 dimm->dev.type = &dimm_attr_type;
608 dimm->dev.bus = &mci->bus; 608 dimm->dev.bus = mci->bus;
609 device_initialize(&dimm->dev); 609 device_initialize(&dimm->dev);
610 610
611 dimm->dev.parent = &mci->dev; 611 dimm->dev.parent = &mci->dev;
@@ -975,11 +975,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
975 * The memory controller needs its own bus, in order to avoid 975 * The memory controller needs its own bus, in order to avoid
976 * namespace conflicts at /sys/bus/edac. 976 * namespace conflicts at /sys/bus/edac.
977 */ 977 */
978 mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); 978 mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
979 if (!mci->bus.name) 979 if (!mci->bus->name)
980 return -ENOMEM; 980 return -ENOMEM;
981 edac_dbg(0, "creating bus %s\n", mci->bus.name); 981
982 err = bus_register(&mci->bus); 982 edac_dbg(0, "creating bus %s\n", mci->bus->name);
983
984 err = bus_register(mci->bus);
983 if (err < 0) 985 if (err < 0)
984 return err; 986 return err;
985 987
@@ -988,7 +990,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
988 device_initialize(&mci->dev); 990 device_initialize(&mci->dev);
989 991
990 mci->dev.parent = mci_pdev; 992 mci->dev.parent = mci_pdev;
991 mci->dev.bus = &mci->bus; 993 mci->dev.bus = mci->bus;
992 dev_set_name(&mci->dev, "mc%d", mci->mc_idx); 994 dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
993 dev_set_drvdata(&mci->dev, mci); 995 dev_set_drvdata(&mci->dev, mci);
994 pm_runtime_forbid(&mci->dev); 996 pm_runtime_forbid(&mci->dev);
@@ -997,8 +999,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
997 err = device_add(&mci->dev); 999 err = device_add(&mci->dev);
998 if (err < 0) { 1000 if (err < 0) {
999 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); 1001 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
1000 bus_unregister(&mci->bus); 1002 bus_unregister(mci->bus);
1001 kfree(mci->bus.name); 1003 kfree(mci->bus->name);
1002 return err; 1004 return err;
1003 } 1005 }
1004 1006
@@ -1064,8 +1066,8 @@ fail:
1064 } 1066 }
1065fail2: 1067fail2:
1066 device_unregister(&mci->dev); 1068 device_unregister(&mci->dev);
1067 bus_unregister(&mci->bus); 1069 bus_unregister(mci->bus);
1068 kfree(mci->bus.name); 1070 kfree(mci->bus->name);
1069 return err; 1071 return err;
1070} 1072}
1071 1073
@@ -1098,8 +1100,8 @@ void edac_unregister_sysfs(struct mem_ctl_info *mci)
1098{ 1100{
1099 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); 1101 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1100 device_unregister(&mci->dev); 1102 device_unregister(&mci->dev);
1101 bus_unregister(&mci->bus); 1103 bus_unregister(mci->bus);
1102 kfree(mci->bus.name); 1104 kfree(mci->bus->name);
1103} 1105}
1104 1106
1105static void mc_attr_release(struct device *dev) 1107static void mc_attr_release(struct device *dev)
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 1b635178cc44..157b934e8ce3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -974,7 +974,7 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
974 if (!i5100_debugfs) 974 if (!i5100_debugfs)
975 return -ENODEV; 975 return -ENODEV;
976 976
977 priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs); 977 priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
978 978
979 if (!priv->debugfs) 979 if (!priv->debugfs)
980 return -ENOMEM; 980 return -ENOMEM;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 7ef316fdc4d9..ac1b43a04285 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -54,6 +54,7 @@
54#define FW_CDEV_KERNEL_VERSION 5 54#define FW_CDEV_KERNEL_VERSION 5
55#define FW_CDEV_VERSION_EVENT_REQUEST2 4 55#define FW_CDEV_VERSION_EVENT_REQUEST2 4
56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
57#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
57 58
58struct client { 59struct client {
59 u32 version; 60 u32 version;
@@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
1005 a->channel, a->speed, a->header_size, cb, client); 1006 a->channel, a->speed, a->header_size, cb, client);
1006 if (IS_ERR(context)) 1007 if (IS_ERR(context))
1007 return PTR_ERR(context); 1008 return PTR_ERR(context);
1009 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1010 context->drop_overflow_headers = true;
1008 1011
1009 /* We only support one context at this time. */ 1012 /* We only support one context at this time. */
1010 spin_lock_irq(&client->lock); 1013 spin_lock_irq(&client->lock);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9e1db6490b9a..afb701ec90ca 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2749{ 2749{
2750 u32 *ctx_hdr; 2750 u32 *ctx_hdr;
2751 2751
2752 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) 2752 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2753 if (ctx->base.drop_overflow_headers)
2754 return;
2753 flush_iso_completions(ctx); 2755 flush_iso_completions(ctx);
2756 }
2754 2757
2755 ctx_hdr = ctx->header + ctx->header_length; 2758 ctx_hdr = ctx->header + ctx->header_length;
2756 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); 2759 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
@@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context,
2910 2913
2911 sync_it_packet_for_cpu(context, d); 2914 sync_it_packet_for_cpu(context, d);
2912 2915
2913 if (ctx->header_length + 4 > PAGE_SIZE) 2916 if (ctx->header_length + 4 > PAGE_SIZE) {
2917 if (ctx->base.drop_overflow_headers)
2918 return 1;
2914 flush_iso_completions(ctx); 2919 flush_iso_completions(ctx);
2920 }
2915 2921
2916 ctx_hdr = ctx->header + ctx->header_length; 2922 ctx_hdr = ctx->header + ctx->header_length;
2917 ctx->last_timestamp = le16_to_cpu(last->res_count); 2923 ctx->last_timestamp = le16_to_cpu(last->res_count);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index eb760a218da4..232fa8fce26a 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -419,6 +419,13 @@ static void __init dmi_format_ids(char *buf, size_t len)
419 dmi_get_system_info(DMI_BIOS_DATE)); 419 dmi_get_system_info(DMI_BIOS_DATE));
420} 420}
421 421
422/*
423 * Check for DMI/SMBIOS headers in the system firmware image. Any
424 * SMBIOS header must start 16 bytes before the DMI header, so take a
425 * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
426 * 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
427 * takes precedence) and return 0. Otherwise return 1.
428 */
422static int __init dmi_present(const u8 *buf) 429static int __init dmi_present(const u8 *buf)
423{ 430{
424 int smbios_ver; 431 int smbios_ver;
@@ -506,6 +513,13 @@ void __init dmi_scan_machine(void)
506 if (p == NULL) 513 if (p == NULL)
507 goto error; 514 goto error;
508 515
516 /*
517 * Iterate over all possible DMI header addresses q.
518 * Maintain the 32 bytes around q in buf. On the
519 * first iteration, substitute zero for the
520 * out-of-range bytes so there is no chance of falsely
521 * detecting an SMBIOS header.
522 */
509 memset(buf, 0, 16); 523 memset(buf, 0, 16);
510 for (q = p; q < p + 0x10000; q += 16) { 524 for (q = p; q < p + 0x10000; q += 16) {
511 memcpy_fromio(buf + 16, q, 16); 525 memcpy_fromio(buf + 16, q, 16);
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 8bd1bb6dbe47..8a7432a4b413 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -583,6 +583,9 @@ int efivars_sysfs_init(void)
583 struct kobject *parent_kobj = efivars_kobject(); 583 struct kobject *parent_kobj = efivars_kobject();
584 int error = 0; 584 int error = 0;
585 585
586 if (!efi_enabled(EFI_RUNTIME_SERVICES))
587 return -ENODEV;
588
586 /* No efivars has been registered yet */ 589 /* No efivars has been registered yet */
587 if (!parent_kobj) 590 if (!parent_kobj)
588 return 0; 591 return 0;
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
index e3ceaacde45c..73b73969d361 100644
--- a/drivers/gpio/gpio-msm-v1.c
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/err.h>
24 25
25#include <mach/msm_gpiomux.h> 26#include <mach/msm_gpiomux.h>
26 27
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index f4491a497cc8..c2fa77086eb5 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -378,7 +378,7 @@ static int msm_gpio_probe(struct platform_device *pdev)
378 int ret, ngpio; 378 int ret, ngpio;
379 struct resource *res; 379 struct resource *res;
380 380
381 if (!of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) { 381 if (of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) {
382 dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__); 382 dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__);
383 return -EINVAL; 383 return -EINVAL;
384 } 384 }
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 98d670825a1a..6e8887fe6c1b 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
323 323
324 astbo->gem.driver_private = NULL; 324 astbo->gem.driver_private = NULL;
325 astbo->bo.bdev = &ast->ttm.bdev; 325 astbo->bo.bdev = &ast->ttm.bdev;
326 astbo->bo.bdev->dev_mapping = dev->dev_mapping;
326 327
327 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 328 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
328 329
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 0047012045c2..69fd8f1ac8df 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
328 328
329 cirrusbo->gem.driver_private = NULL; 329 cirrusbo->gem.driver_private = NULL;
330 cirrusbo->bo.bdev = &cirrus->ttm.bdev; 330 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
331 cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
331 332
332 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 333 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
333 334
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 738a4294d820..6a647493ca7f 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
677 /* don't break so fail path works correct */ 677 /* don't break so fail path works correct */
678 fail = 1; 678 fail = 1;
679 break; 679 break;
680
681 if (connector->dpms != DRM_MODE_DPMS_ON) {
682 DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
683 mode_changed = true;
684 }
680 } 685 }
681 } 686 }
682 687
@@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
754 ret = -EINVAL; 759 ret = -EINVAL;
755 goto fail; 760 goto fail;
756 } 761 }
762 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
763 for (i = 0; i < set->num_connectors; i++) {
764 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
765 drm_get_connector_name(set->connectors[i]));
766 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
767 }
757 } 768 }
758 drm_helper_disable_unused_functions(dev); 769 drm_helper_disable_unused_functions(dev);
759 } else if (fb_changed) { 770 } else if (fb_changed) {
@@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
771 } 782 }
772 } 783 }
773 784
774 /*
775 * crtc set_config helpers implicit set the crtc and all connected
776 * encoders to DPMS on for a full mode set. But for just an fb update it
777 * doesn't do that. To not confuse userspace, do an explicit DPMS_ON
778 * unconditionally. This will also ensure driver internal dpms state is
779 * consistent again.
780 */
781 if (set->crtc->enabled) {
782 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
783 for (i = 0; i < set->num_connectors; i++) {
784 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
785 drm_get_connector_name(set->connectors[i]));
786 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
787 }
788 }
789
790 kfree(save_connectors); 785 kfree(save_connectors);
791 kfree(save_encoders); 786 kfree(save_encoders);
792 kfree(save_crtcs); 787 kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce7866d36..f92da0a32f0d 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
708 /* Subtract time delta from raw timestamp to get final 708 /* Subtract time delta from raw timestamp to get final
709 * vblank_time timestamp for end of vblank. 709 * vblank_time timestamp for end of vblank.
710 */ 710 */
711 etime = ktime_sub_ns(etime, delta_ns); 711 if (delta_ns < 0)
712 etime = ktime_add_ns(etime, -delta_ns);
713 else
714 etime = ktime_sub_ns(etime, delta_ns);
712 *vblank_time = ktime_to_timeval(etime); 715 *vblank_time = ktime_to_timeval(etime);
713 716
714 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 717 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 95c75edef01a..30ef41bcd7b8 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/module.h>
19 18
20 19
21#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61b094f689a7..6e047bd53e2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h> 15#include <linux/platform_device.h>
17#include <linux/mfd/syscon.h> 16#include <linux/mfd/syscon.h>
18#include <linux/regmap.h> 17#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3e106beca5b6..1c263dac3c1c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -14,7 +14,6 @@
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/platform_device.h> 17#include <linux/platform_device.h>
19#include <linux/clk.h> 18#include <linux/clk.h>
20#include <linux/of_device.h> 19#include <linux/of_device.h>
@@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
130 .data = &exynos5_fimd_driver_data }, 129 .data = &exynos5_fimd_driver_data },
131 {}, 130 {},
132}; 131};
133MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
134#endif 132#endif
135 133
136static inline struct fimd_driver_data *drm_fimd_get_driver_data( 134static inline struct fimd_driver_data *drm_fimd_get_driver_data(
@@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = {
1082 }, 1080 },
1083 {}, 1081 {},
1084}; 1082};
1085MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
1086 1083
1087static const struct dev_pm_ops fimd_pm_ops = { 1084static const struct dev_pm_ops fimd_pm_ops = {
1088 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) 1085 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 42a5a5466075..eddea4941483 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/clk.h> 11#include <linux/clk.h>
13#include <linux/err.h> 12#include <linux/err.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
@@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d,
806 struct g2d_cmdlist_node *node = 805 struct g2d_cmdlist_node *node =
807 list_first_entry(&runqueue_node->run_cmdlist, 806 list_first_entry(&runqueue_node->run_cmdlist,
808 struct g2d_cmdlist_node, list); 807 struct g2d_cmdlist_node, list);
808 int ret;
809
810 ret = pm_runtime_get_sync(g2d->dev);
811 if (ret < 0) {
812 dev_warn(g2d->dev, "failed pm power on.\n");
813 return;
814 }
809 815
810 pm_runtime_get_sync(g2d->dev); 816 ret = clk_prepare_enable(g2d->gate_clk);
811 clk_enable(g2d->gate_clk); 817 if (ret < 0) {
818 dev_warn(g2d->dev, "failed to enable clock.\n");
819 pm_runtime_put_sync(g2d->dev);
820 return;
821 }
812 822
813 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 823 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
814 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 824 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
861 runqueue_work); 871 runqueue_work);
862 872
863 mutex_lock(&g2d->runqueue_mutex); 873 mutex_lock(&g2d->runqueue_mutex);
864 clk_disable(g2d->gate_clk); 874 clk_disable_unprepare(g2d->gate_clk);
865 pm_runtime_put_sync(g2d->dev); 875 pm_runtime_put_sync(g2d->dev);
866 876
867 complete(&g2d->runqueue_node->complete); 877 complete(&g2d->runqueue_node->complete);
@@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = {
1521 { .compatible = "samsung,exynos5250-g2d" }, 1531 { .compatible = "samsung,exynos5250-g2d" },
1522 {}, 1532 {},
1523}; 1533};
1524MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1525#endif 1534#endif
1526 1535
1527struct platform_driver g2d_driver = { 1536struct platform_driver g2d_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 472e3b25e7f2..90b8a1a5344c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h> 15#include <linux/platform_device.h>
17#include <linux/clk.h> 16#include <linux/clk.h>
18#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index aaa550d622f0..8d3bc01d6834 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/wait.h> 17#include <linux/wait.h>
18#include <linux/module.h>
19#include <linux/platform_device.h> 18#include <linux/platform_device.h>
20#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
21 20
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b1ef8e7ff9c9..d2b6ab4def93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h> 15#include <linux/platform_device.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/clk.h> 17#include <linux/clk.h>
@@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
342 */ 341 */
343 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 342 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
344 prop_list->ipp_id); 343 prop_list->ipp_id);
345 if (!ippdrv) { 344 if (IS_ERR(ippdrv)) {
346 DRM_ERROR("not found ipp%d driver.\n", 345 DRM_ERROR("not found ipp%d driver.\n",
347 prop_list->ipp_id); 346 prop_list->ipp_id);
348 return -EINVAL; 347 return PTR_ERR(ippdrv);
349 } 348 }
350 349
351 prop_list = ippdrv->prop_list; 350 prop_list = ippdrv->prop_list;
@@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
970 /* find command node */ 969 /* find command node */
971 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 970 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
972 qbuf->prop_id); 971 qbuf->prop_id);
973 if (!c_node) { 972 if (IS_ERR(c_node)) {
974 DRM_ERROR("failed to get command node.\n"); 973 DRM_ERROR("failed to get command node.\n");
975 return -EFAULT; 974 return PTR_ERR(c_node);
976 } 975 }
977 976
978 /* buffer control */ 977 /* buffer control */
@@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1106 1105
1107 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1106 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1108 cmd_ctrl->prop_id); 1107 cmd_ctrl->prop_id);
1109 if (!c_node) { 1108 if (IS_ERR(c_node)) {
1110 DRM_ERROR("invalid command node list.\n"); 1109 DRM_ERROR("invalid command node list.\n");
1111 return -EINVAL; 1110 return PTR_ERR(c_node);
1112 } 1111 }
1113 1112
1114 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1113 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 427640aa5148..49669aa24c45 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/err.h> 13#include <linux/err.h>
15#include <linux/interrupt.h> 14#include <linux/interrupt.h>
16#include <linux/io.h> 15#include <linux/io.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 41cc74d83e4e..c57c56519add 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -13,7 +13,6 @@
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18 17
19#include <drm/exynos_drm.h> 18#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 62ef5971ac3c..2f5c6942c968 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -24,7 +24,6 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/wait.h> 25#include <linux/wait.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/module.h>
28#include <linux/platform_device.h> 27#include <linux/platform_device.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
30#include <linux/irq.h> 29#include <linux/irq.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index ef04255076c7..6e320ae9afed 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/module.h>
19 18
20#include "exynos_drm_drv.h" 19#include "exynos_drm_drv.h"
21#include "exynos_hdmi.h" 20#include "exynos_hdmi.h"
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 42ffb71c63bc..c9a137caea41 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -23,7 +23,6 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/module.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
29#include <linux/irq.h> 28#include <linux/irq.h>
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index adb319b53ecd..f4669802a0fb 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1495 dev_priv->dev = dev; 1495 dev_priv->dev = dev;
1496 dev_priv->info = info; 1496 dev_priv->info = info;
1497 1497
1498 spin_lock_init(&dev_priv->irq_lock);
1499 spin_lock_init(&dev_priv->gpu_error.lock);
1500 spin_lock_init(&dev_priv->rps.lock);
1501 spin_lock_init(&dev_priv->gt_lock);
1502 spin_lock_init(&dev_priv->backlight.lock);
1503 mutex_init(&dev_priv->dpio_lock);
1504 mutex_init(&dev_priv->rps.hw_lock);
1505 mutex_init(&dev_priv->modeset_restore_lock);
1506
1498 i915_dump_device_info(dev_priv); 1507 i915_dump_device_info(dev_priv);
1499 1508
1500 if (i915_get_bridge_dev(dev)) { 1509 if (i915_get_bridge_dev(dev)) {
@@ -1585,6 +1594,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1585 intel_detect_pch(dev); 1594 intel_detect_pch(dev);
1586 1595
1587 intel_irq_init(dev); 1596 intel_irq_init(dev);
1597 intel_pm_init(dev);
1598 intel_gt_sanitize(dev);
1588 intel_gt_init(dev); 1599 intel_gt_init(dev);
1589 1600
1590 /* Try to make sure MCHBAR is enabled before poking at it */ 1601 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1610,15 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1610 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1621 if (!IS_I945G(dev) && !IS_I945GM(dev))
1611 pci_enable_msi(dev->pdev); 1622 pci_enable_msi(dev->pdev);
1612 1623
1613 spin_lock_init(&dev_priv->irq_lock);
1614 spin_lock_init(&dev_priv->gpu_error.lock);
1615 spin_lock_init(&dev_priv->rps.lock);
1616 spin_lock_init(&dev_priv->backlight.lock);
1617 mutex_init(&dev_priv->dpio_lock);
1618
1619 mutex_init(&dev_priv->rps.hw_lock);
1620 mutex_init(&dev_priv->modeset_restore_lock);
1621
1622 dev_priv->num_plane = 1; 1624 dev_priv->num_plane = 1;
1623 if (IS_VALLEYVIEW(dev)) 1625 if (IS_VALLEYVIEW(dev))
1624 dev_priv->num_plane = 2; 1626 dev_priv->num_plane = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 062cbda1bf4a..45b3c030f483 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060
123MODULE_PARM_DESC(preliminary_hw_support, 123MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. (default: false)"); 124 "Enable preliminary hardware support. (default: false)");
125 125
126int i915_disable_power_well __read_mostly = 0; 126int i915_disable_power_well __read_mostly = 1;
127module_param_named(disable_power_well, i915_disable_power_well, int, 0600); 127module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
128MODULE_PARM_DESC(disable_power_well, 128MODULE_PARM_DESC(disable_power_well,
129 "Disable the power well when possible (default: false)"); 129 "Disable the power well when possible (default: true)");
130 130
131int i915_enable_ips __read_mostly = 1; 131int i915_enable_ips __read_mostly = 1;
132module_param_named(enable_ips, i915_enable_ips, int, 0600); 132module_param_named(enable_ips, i915_enable_ips, int, 0600);
@@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev)
706{ 706{
707 int error = 0; 707 int error = 0;
708 708
709 intel_gt_reset(dev); 709 intel_gt_sanitize(dev);
710 710
711 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 711 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
712 mutex_lock(&dev->struct_mutex); 712 mutex_lock(&dev->struct_mutex);
@@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev)
732 732
733 pci_set_master(dev->pdev); 733 pci_set_master(dev->pdev);
734 734
735 intel_gt_reset(dev); 735 intel_gt_sanitize(dev);
736 736
737 /* 737 /*
738 * Platforms with opregion should have sane BIOS, older ones (gen3 and 738 * Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1253 1253
1254#define __i915_read(x, y) \ 1254#define __i915_read(x, y) \
1255u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1255u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1256 unsigned long irqflags; \
1256 u##x val = 0; \ 1257 u##x val = 0; \
1258 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1257 if (IS_GEN5(dev_priv->dev)) \ 1259 if (IS_GEN5(dev_priv->dev)) \
1258 ilk_dummy_write(dev_priv); \ 1260 ilk_dummy_write(dev_priv); \
1259 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1261 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1260 unsigned long irqflags; \
1261 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1262 if (dev_priv->forcewake_count == 0) \ 1262 if (dev_priv->forcewake_count == 0) \
1263 dev_priv->gt.force_wake_get(dev_priv); \ 1263 dev_priv->gt.force_wake_get(dev_priv); \
1264 val = read##y(dev_priv->regs + reg); \ 1264 val = read##y(dev_priv->regs + reg); \
1265 if (dev_priv->forcewake_count == 0) \ 1265 if (dev_priv->forcewake_count == 0) \
1266 dev_priv->gt.force_wake_put(dev_priv); \ 1266 dev_priv->gt.force_wake_put(dev_priv); \
1267 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1268 } else { \ 1267 } else { \
1269 val = read##y(dev_priv->regs + reg); \ 1268 val = read##y(dev_priv->regs + reg); \
1270 } \ 1269 } \
1270 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1271 trace_i915_reg_rw(false, reg, val, sizeof(val)); \ 1271 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1272 return val; \ 1272 return val; \
1273} 1273}
@@ -1280,8 +1280,10 @@ __i915_read(64, q)
1280 1280
1281#define __i915_write(x, y) \ 1281#define __i915_write(x, y) \
1282void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 1282void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1283 unsigned long irqflags; \
1283 u32 __fifo_ret = 0; \ 1284 u32 __fifo_ret = 0; \
1284 trace_i915_reg_rw(true, reg, val, sizeof(val)); \ 1285 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1286 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1285 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1287 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1286 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1288 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1287 } \ 1289 } \
@@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1293 gen6_gt_check_fifodbg(dev_priv); \ 1295 gen6_gt_check_fifodbg(dev_priv); \
1294 } \ 1296 } \
1295 hsw_unclaimed_reg_check(dev_priv, reg); \ 1297 hsw_unclaimed_reg_check(dev_priv, reg); \
1298 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1296} 1299}
1297__i915_write(8, b) 1300__i915_write(8, b)
1298__i915_write(16, w) 1301__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a416645bcd23..1929bffc1c77 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -555,6 +555,7 @@ enum intel_sbi_destination {
555#define QUIRK_PIPEA_FORCE (1<<0) 555#define QUIRK_PIPEA_FORCE (1<<0)
556#define QUIRK_LVDS_SSC_DISABLE (1<<1) 556#define QUIRK_LVDS_SSC_DISABLE (1<<1)
557#define QUIRK_INVERT_BRIGHTNESS (1<<2) 557#define QUIRK_INVERT_BRIGHTNESS (1<<2)
558#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
558 559
559struct intel_fbdev; 560struct intel_fbdev;
560struct intel_fbc_work; 561struct intel_fbc_work;
@@ -1581,9 +1582,10 @@ void i915_hangcheck_elapsed(unsigned long data);
1581void i915_handle_error(struct drm_device *dev, bool wedged); 1582void i915_handle_error(struct drm_device *dev, bool wedged);
1582 1583
1583extern void intel_irq_init(struct drm_device *dev); 1584extern void intel_irq_init(struct drm_device *dev);
1585extern void intel_pm_init(struct drm_device *dev);
1584extern void intel_hpd_init(struct drm_device *dev); 1586extern void intel_hpd_init(struct drm_device *dev);
1585extern void intel_gt_init(struct drm_device *dev); 1587extern void intel_gt_init(struct drm_device *dev);
1586extern void intel_gt_reset(struct drm_device *dev); 1588extern void intel_gt_sanitize(struct drm_device *dev);
1587 1589
1588void i915_error_state_free(struct kref *error_ref); 1590void i915_error_state_free(struct kref *error_ref);
1589 1591
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4200c32407ec..d9e2208cfe98 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1880 u32 seqno = intel_ring_get_seqno(ring); 1880 u32 seqno = intel_ring_get_seqno(ring);
1881 1881
1882 BUG_ON(ring == NULL); 1882 BUG_ON(ring == NULL);
1883 if (obj->ring != ring && obj->last_write_seqno) {
1884 /* Keep the seqno relative to the current ring */
1885 obj->last_write_seqno = seqno;
1886 }
1883 obj->ring = ring; 1887 obj->ring = ring;
1884 1888
1885 /* Add a reference if we're newly entering the active list. */ 1889 /* Add a reference if we're newly entering the active list. */
@@ -2254,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
2254 2258
2255 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2259 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2256 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2260 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2257 i915_gem_write_fence(dev, i, reg->obj); 2261
2262 /*
2263 * Commit delayed tiling changes if we have an object still
2264 * attached to the fence, otherwise just clear the fence.
2265 */
2266 if (reg->obj) {
2267 i915_gem_object_update_fence(reg->obj, reg,
2268 reg->obj->tiling_mode);
2269 } else {
2270 i915_gem_write_fence(dev, i, NULL);
2271 }
2258 } 2272 }
2259} 2273}
2260 2274
@@ -2653,7 +2667,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2653 drm_i915_private_t *dev_priv = dev->dev_private; 2667 drm_i915_private_t *dev_priv = dev->dev_private;
2654 int fence_reg; 2668 int fence_reg;
2655 int fence_pitch_shift; 2669 int fence_pitch_shift;
2656 uint64_t val;
2657 2670
2658 if (INTEL_INFO(dev)->gen >= 6) { 2671 if (INTEL_INFO(dev)->gen >= 6) {
2659 fence_reg = FENCE_REG_SANDYBRIDGE_0; 2672 fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2663,8 +2676,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2663 fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 2676 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2664 } 2677 }
2665 2678
2679 fence_reg += reg * 8;
2680
2681 /* To w/a incoherency with non-atomic 64-bit register updates,
2682 * we split the 64-bit update into two 32-bit writes. In order
2683 * for a partial fence not to be evaluated between writes, we
2684 * precede the update with write to turn off the fence register,
2685 * and only enable the fence as the last step.
2686 *
2687 * For extra levels of paranoia, we make sure each step lands
2688 * before applying the next step.
2689 */
2690 I915_WRITE(fence_reg, 0);
2691 POSTING_READ(fence_reg);
2692
2666 if (obj) { 2693 if (obj) {
2667 u32 size = obj->gtt_space->size; 2694 u32 size = obj->gtt_space->size;
2695 uint64_t val;
2668 2696
2669 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2697 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2670 0xfffff000) << 32; 2698 0xfffff000) << 32;
@@ -2673,12 +2701,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2673 if (obj->tiling_mode == I915_TILING_Y) 2701 if (obj->tiling_mode == I915_TILING_Y)
2674 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2702 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2675 val |= I965_FENCE_REG_VALID; 2703 val |= I965_FENCE_REG_VALID;
2676 } else
2677 val = 0;
2678 2704
2679 fence_reg += reg * 8; 2705 I915_WRITE(fence_reg + 4, val >> 32);
2680 I915_WRITE64(fence_reg, val); 2706 POSTING_READ(fence_reg + 4);
2681 POSTING_READ(fence_reg); 2707
2708 I915_WRITE(fence_reg + 0, val);
2709 POSTING_READ(fence_reg);
2710 } else {
2711 I915_WRITE(fence_reg + 4, 0);
2712 POSTING_READ(fence_reg + 4);
2713 }
2682} 2714}
2683 2715
2684static void i915_write_fence_reg(struct drm_device *dev, int reg, 2716static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2773,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
2773 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) 2805 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2774 mb(); 2806 mb();
2775 2807
2808 WARN(obj && (!obj->stride || !obj->tiling_mode),
2809 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2810 obj->stride, obj->tiling_mode);
2811
2776 switch (INTEL_INFO(dev)->gen) { 2812 switch (INTEL_INFO(dev)->gen) {
2777 case 7: 2813 case 7:
2778 case 6: 2814 case 6:
@@ -2796,56 +2832,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
2796 return fence - dev_priv->fence_regs; 2832 return fence - dev_priv->fence_regs;
2797} 2833}
2798 2834
2799struct write_fence {
2800 struct drm_device *dev;
2801 struct drm_i915_gem_object *obj;
2802 int fence;
2803};
2804
2805static void i915_gem_write_fence__ipi(void *data)
2806{
2807 struct write_fence *args = data;
2808
2809 /* Required for SNB+ with LLC */
2810 wbinvd();
2811
2812 /* Required for VLV */
2813 i915_gem_write_fence(args->dev, args->fence, args->obj);
2814}
2815
2816static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 2835static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2817 struct drm_i915_fence_reg *fence, 2836 struct drm_i915_fence_reg *fence,
2818 bool enable) 2837 bool enable)
2819{ 2838{
2820 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2839 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2821 struct write_fence args = { 2840 int reg = fence_number(dev_priv, fence);
2822 .dev = obj->base.dev, 2841
2823 .fence = fence_number(dev_priv, fence), 2842 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2824 .obj = enable ? obj : NULL,
2825 };
2826
2827 /* In order to fully serialize access to the fenced region and
2828 * the update to the fence register we need to take extreme
2829 * measures on SNB+. In theory, the write to the fence register
2830 * flushes all memory transactions before, and coupled with the
2831 * mb() placed around the register write we serialise all memory
2832 * operations with respect to the changes in the tiler. Yet, on
2833 * SNB+ we need to take a step further and emit an explicit wbinvd()
2834 * on each processor in order to manually flush all memory
2835 * transactions before updating the fence register.
2836 *
2837 * However, Valleyview complicates matter. There the wbinvd is
2838 * insufficient and unlike SNB/IVB requires the serialising
2839 * register write. (Note that that register write by itself is
2840 * conversely not sufficient for SNB+.) To compromise, we do both.
2841 */
2842 if (INTEL_INFO(args.dev)->gen >= 6)
2843 on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
2844 else
2845 i915_gem_write_fence(args.dev, args.fence, args.obj);
2846 2843
2847 if (enable) { 2844 if (enable) {
2848 obj->fence_reg = args.fence; 2845 obj->fence_reg = reg;
2849 fence->obj = obj; 2846 fence->obj = obj;
2850 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 2847 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2851 } else { 2848 } else {
@@ -2853,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2853 fence->obj = NULL; 2850 fence->obj = NULL;
2854 list_del_init(&fence->lru_list); 2851 list_del_init(&fence->lru_list);
2855 } 2852 }
2853 obj->fence_dirty = false;
2856} 2854}
2857 2855
2858static int 2856static int
@@ -2982,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2982 return 0; 2980 return 0;
2983 2981
2984 i915_gem_object_update_fence(obj, reg, enable); 2982 i915_gem_object_update_fence(obj, reg, enable);
2985 obj->fence_dirty = false;
2986 2983
2987 return 0; 2984 return 0;
2988} 2985}
@@ -4611,7 +4608,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4611 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 4608 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4612 if (obj->pages_pin_count == 0) 4609 if (obj->pages_pin_count == 0)
4613 cnt += obj->base.size >> PAGE_SHIFT; 4610 cnt += obj->base.size >> PAGE_SHIFT;
4614 list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) 4611 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
4615 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4612 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4616 cnt += obj->base.size >> PAGE_SHIFT; 4613 cnt += obj->base.size >> PAGE_SHIFT;
4617 4614
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f2326fc60ac9..6f514297c483 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1856,10 +1856,16 @@
1856#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1856#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
1857 1857
1858#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) 1858#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
1859/* HDMI/DP bits are gen4+ */ 1859/*
1860#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) 1860 * HDMI/DP bits are gen4+
1861 *
1862 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
1863 * Please check the detailed lore in the commit message for for experimental
1864 * evidence.
1865 */
1866#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
1861#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) 1867#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
1862#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) 1868#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
1863#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 1869#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
1864#define PORTC_HOTPLUG_INT_STATUS (3 << 19) 1870#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
1865#define PORTB_HOTPLUG_INT_STATUS (3 << 17) 1871#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 324211ac9c55..b042ee5c4070 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
301 struct intel_digital_port *intel_dig_port = 301 struct intel_digital_port *intel_dig_port =
302 enc_to_dig_port(encoder); 302 enc_to_dig_port(encoder);
303 303
304 intel_dp->DP = intel_dig_port->port_reversal | 304 intel_dp->DP = intel_dig_port->saved_port_bits |
305 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 305 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
306 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); 306 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
307 307
@@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1109 * enabling the port. 1109 * enabling the port.
1110 */ 1110 */
1111 I915_WRITE(DDI_BUF_CTL(port), 1111 I915_WRITE(DDI_BUF_CTL(port),
1112 intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); 1112 intel_dig_port->saved_port_bits |
1113 DDI_BUF_CTL_ENABLE);
1113 } else if (type == INTEL_OUTPUT_EDP) { 1114 } else if (type == INTEL_OUTPUT_EDP) {
1114 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1115 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1115 1116
@@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1347 intel_encoder->get_config = intel_ddi_get_config; 1348 intel_encoder->get_config = intel_ddi_get_config;
1348 1349
1349 intel_dig_port->port = port; 1350 intel_dig_port->port = port;
1350 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & 1351 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
1351 DDI_BUF_PORT_REVERSAL; 1352 (DDI_BUF_PORT_REVERSAL |
1353 DDI_A_4_LANES);
1352 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); 1354 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1353 1355
1354 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1356 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 85f3eb74d2b7..e38b45786653 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
4913 uint32_t tmp; 4913 uint32_t tmp;
4914 4914
4915 tmp = I915_READ(PFIT_CONTROL); 4915 tmp = I915_READ(PFIT_CONTROL);
4916 if (!(tmp & PFIT_ENABLE))
4917 return;
4916 4918
4919 /* Check whether the pfit is attached to our pipe. */
4917 if (INTEL_INFO(dev)->gen < 4) { 4920 if (INTEL_INFO(dev)->gen < 4) {
4918 if (crtc->pipe != PIPE_B) 4921 if (crtc->pipe != PIPE_B)
4919 return; 4922 return;
4920
4921 /* gen2/3 store dither state in pfit control, needs to match */
4922 pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE;
4923 } else { 4923 } else {
4924 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 4924 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4925 return; 4925 return;
4926 } 4926 }
4927 4927
4928 if (!(tmp & PFIT_ENABLE)) 4928 pipe_config->gmch_pfit.control = tmp;
4929 return;
4930
4931 pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL);
4932 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 4929 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
4933 if (INTEL_INFO(dev)->gen < 5) 4930 if (INTEL_INFO(dev)->gen < 5)
4934 pipe_config->gmch_pfit.lvds_border_bits = 4931 pipe_config->gmch_pfit.lvds_border_bits =
@@ -8272,9 +8269,11 @@ check_crtc_state(struct drm_device *dev)
8272 8269
8273 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 8270 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8274 base.head) { 8271 base.head) {
8272 enum pipe pipe;
8275 if (encoder->base.crtc != &crtc->base) 8273 if (encoder->base.crtc != &crtc->base)
8276 continue; 8274 continue;
8277 if (encoder->get_config) 8275 if (encoder->get_config &&
8276 encoder->get_hw_state(encoder, &pipe))
8278 encoder->get_config(encoder, &pipe_config); 8277 encoder->get_config(encoder, &pipe_config);
8279 } 8278 }
8280 8279
@@ -8317,6 +8316,8 @@ check_shared_dpll_state(struct drm_device *dev)
8317 pll->active, pll->refcount); 8316 pll->active, pll->refcount);
8318 WARN(pll->active && !pll->on, 8317 WARN(pll->active && !pll->on,
8319 "pll in active use but not on in sw tracking\n"); 8318 "pll in active use but not on in sw tracking\n");
8319 WARN(pll->on && !pll->active,
8320 "pll in on but not on in use in sw tracking\n");
8320 WARN(pll->on != active, 8321 WARN(pll->on != active,
8321 "pll on state mismatch (expected %i, found %i)\n", 8322 "pll on state mismatch (expected %i, found %i)\n",
8322 pll->on, active); 8323 pll->on, active);
@@ -8541,15 +8542,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
8541} 8542}
8542 8543
8543static bool 8544static bool
8544is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, 8545is_crtc_connector_off(struct drm_mode_set *set)
8545 int num_connectors)
8546{ 8546{
8547 int i; 8547 int i;
8548 8548
8549 for (i = 0; i < num_connectors; i++) 8549 if (set->num_connectors == 0)
8550 if (connectors[i].encoder && 8550 return false;
8551 connectors[i].encoder->crtc == crtc && 8551
8552 connectors[i].dpms != DRM_MODE_DPMS_ON) 8552 if (WARN_ON(set->connectors == NULL))
8553 return false;
8554
8555 for (i = 0; i < set->num_connectors; i++)
8556 if (set->connectors[i]->encoder &&
8557 set->connectors[i]->encoder->crtc == set->crtc &&
8558 set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
8553 return true; 8559 return true;
8554 8560
8555 return false; 8561 return false;
@@ -8562,10 +8568,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8562 8568
8563 /* We should be able to check here if the fb has the same properties 8569 /* We should be able to check here if the fb has the same properties
8564 * and then just flip_or_move it */ 8570 * and then just flip_or_move it */
8565 if (set->connectors != NULL && 8571 if (is_crtc_connector_off(set)) {
8566 is_crtc_connector_off(set->crtc, *set->connectors, 8572 config->mode_changed = true;
8567 set->num_connectors)) {
8568 config->mode_changed = true;
8569 } else if (set->crtc->fb != set->fb) { 8573 } else if (set->crtc->fb != set->fb) {
8570 /* If we have no fb then treat it as a full mode set */ 8574 /* If we have no fb then treat it as a full mode set */
8571 if (set->crtc->fb == NULL) { 8575 if (set->crtc->fb == NULL) {
@@ -9398,6 +9402,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
9398 DRM_INFO("applying inverted panel brightness quirk\n"); 9402 DRM_INFO("applying inverted panel brightness quirk\n");
9399} 9403}
9400 9404
9405/*
9406 * Some machines (Dell XPS13) suffer broken backlight controls if
9407 * BLM_PCH_PWM_ENABLE is set.
9408 */
9409static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
9410{
9411 struct drm_i915_private *dev_priv = dev->dev_private;
9412 dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
9413 DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
9414}
9415
9401struct intel_quirk { 9416struct intel_quirk {
9402 int device; 9417 int device;
9403 int subsystem_vendor; 9418 int subsystem_vendor;
@@ -9467,6 +9482,11 @@ static struct intel_quirk intel_quirks[] = {
9467 9482
9468 /* Acer Aspire 4736Z */ 9483 /* Acer Aspire 4736Z */
9469 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 9484 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
9485
9486 /* Dell XPS13 HD Sandy Bridge */
9487 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
9488 /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
9489 { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
9470}; 9490};
9471 9491
9472static void intel_init_quirks(struct drm_device *dev) 9492static void intel_init_quirks(struct drm_device *dev)
@@ -9817,8 +9837,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
9817 } 9837 }
9818 pll->refcount = pll->active; 9838 pll->refcount = pll->active;
9819 9839
9820 DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", 9840 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
9821 pll->name, pll->refcount); 9841 pll->name, pll->refcount, pll->on);
9822 } 9842 }
9823 9843
9824 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9844 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9869,6 +9889,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9869 struct drm_plane *plane; 9889 struct drm_plane *plane;
9870 struct intel_crtc *crtc; 9890 struct intel_crtc *crtc;
9871 struct intel_encoder *encoder; 9891 struct intel_encoder *encoder;
9892 int i;
9872 9893
9873 intel_modeset_readout_hw_state(dev); 9894 intel_modeset_readout_hw_state(dev);
9874 9895
@@ -9884,6 +9905,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9884 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); 9905 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
9885 } 9906 }
9886 9907
9908 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9909 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
9910
9911 if (!pll->on || pll->active)
9912 continue;
9913
9914 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
9915
9916 pll->disable(dev_priv, pll);
9917 pll->on = false;
9918 }
9919
9887 if (force_restore) { 9920 if (force_restore) {
9888 /* 9921 /*
9889 * We need to use raw interfaces for restoring state to avoid 9922 * We need to use raw interfaces for restoring state to avoid
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index b73971234013..26e162bb3a51 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
75 case DP_LINK_BW_1_62: 75 case DP_LINK_BW_1_62:
76 case DP_LINK_BW_2_7: 76 case DP_LINK_BW_2_7:
77 break; 77 break;
78 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
79 max_link_bw = DP_LINK_BW_2_7;
80 break;
78 default: 81 default:
82 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
83 max_link_bw);
79 max_link_bw = DP_LINK_BW_1_62; 84 max_link_bw = DP_LINK_BW_1_62;
80 break; 85 break;
81 } 86 }
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c8c9b6f48230..b7d6e09456ce 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -504,7 +504,7 @@ struct intel_dp {
504struct intel_digital_port { 504struct intel_digital_port {
505 struct intel_encoder base; 505 struct intel_encoder base;
506 enum port port; 506 enum port port;
507 u32 port_reversal; 507 u32 saved_port_bits;
508 struct intel_dp dp; 508 struct intel_dp dp;
509 struct intel_hdmi hdmi; 509 struct intel_hdmi hdmi;
510}; 510};
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 98df2a0c85bd..2fd3fd5b943e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
785 } 785 }
786} 786}
787 787
788static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
789{
790 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
791
792 if (IS_G4X(dev))
793 return 165000;
794 else if (IS_HASWELL(dev))
795 return 300000;
796 else
797 return 225000;
798}
799
788static int intel_hdmi_mode_valid(struct drm_connector *connector, 800static int intel_hdmi_mode_valid(struct drm_connector *connector,
789 struct drm_display_mode *mode) 801 struct drm_display_mode *mode)
790{ 802{
791 if (mode->clock > 165000) 803 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
792 return MODE_CLOCK_HIGH; 804 return MODE_CLOCK_HIGH;
793 if (mode->clock < 20000) 805 if (mode->clock < 20000)
794 return MODE_CLOCK_LOW; 806 return MODE_CLOCK_LOW;
@@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
806 struct drm_device *dev = encoder->base.dev; 818 struct drm_device *dev = encoder->base.dev;
807 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 819 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
808 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; 820 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
821 int portclock_limit = hdmi_portclock_limit(intel_hdmi);
809 int desired_bpp; 822 int desired_bpp;
810 823
811 if (intel_hdmi->color_range_auto) { 824 if (intel_hdmi->color_range_auto) {
@@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
829 * outputs. We also need to check that the higher clock still fits 842 * outputs. We also need to check that the higher clock still fits
830 * within limits. 843 * within limits.
831 */ 844 */
832 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 845 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
833 && HAS_PCH_SPLIT(dev)) { 846 && HAS_PCH_SPLIT(dev)) {
834 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 847 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
835 desired_bpp = 12*3; 848 desired_bpp = 12*3;
@@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
846 pipe_config->pipe_bpp = desired_bpp; 859 pipe_config->pipe_bpp = desired_bpp;
847 } 860 }
848 861
849 if (adjusted_mode->clock > 225000) { 862 if (adjusted_mode->clock > portclock_limit) {
850 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); 863 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
851 return false; 864 return false;
852 } 865 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 021e8daa022d..61348eae2f04 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
109 flags |= DRM_MODE_FLAG_PVSYNC; 109 flags |= DRM_MODE_FLAG_PVSYNC;
110 110
111 pipe_config->adjusted_mode.flags |= flags; 111 pipe_config->adjusted_mode.flags |= flags;
112
113 /* gen2/3 store dither state in pfit control, needs to match */
114 if (INTEL_INFO(dev)->gen < 4) {
115 tmp = I915_READ(PFIT_CONTROL);
116
117 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
118 }
112} 119}
113 120
114/* The LVDS pin pair needs to be on before the DPLLs are enabled. 121/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
290 297
291 intel_pch_panel_fitting(intel_crtc, pipe_config, 298 intel_pch_panel_fitting(intel_crtc, pipe_config,
292 intel_connector->panel.fitting_mode); 299 intel_connector->panel.fitting_mode);
293 return true;
294 } else { 300 } else {
295 intel_gmch_panel_fitting(intel_crtc, pipe_config, 301 intel_gmch_panel_fitting(intel_crtc, pipe_config,
296 intel_connector->panel.fitting_mode); 302 intel_connector->panel.fitting_mode);
297 }
298 303
299 drm_mode_set_crtcinfo(adjusted_mode, 0); 304 }
300 pipe_config->timings_set = true;
301 305
302 /* 306 /*
303 * XXX: It would be nice to support lower refresh rates on the 307 * XXX: It would be nice to support lower refresh rates on the
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 80bea1d3209f..5950888ae1d0 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
194 adjusted_mode->vdisplay == mode->vdisplay) 194 adjusted_mode->vdisplay == mode->vdisplay)
195 goto out; 195 goto out;
196 196
197 drm_mode_set_crtcinfo(adjusted_mode, 0);
198 pipe_config->timings_set = true;
199
197 switch (fitting_mode) { 200 switch (fitting_mode) {
198 case DRM_MODE_SCALE_CENTER: 201 case DRM_MODE_SCALE_CENTER:
199 /* 202 /*
@@ -494,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
494 goto out; 497 goto out;
495 } 498 }
496 499
497 /* scale to hardware */ 500 /* scale to hardware, but be careful to not overflow */
498 level = level * freq / max; 501 if (freq < max)
502 level = level * freq / max;
503 else
504 level = freq / max * level;
499 505
500 dev_priv->backlight.level = level; 506 dev_priv->backlight.level = level;
501 if (dev_priv->backlight.device) 507 if (dev_priv->backlight.device)
@@ -512,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
512 struct drm_i915_private *dev_priv = dev->dev_private; 518 struct drm_i915_private *dev_priv = dev->dev_private;
513 unsigned long flags; 519 unsigned long flags;
514 520
521 /*
522 * Do not disable backlight on the vgaswitcheroo path. When switching
523 * away from i915, the other client may depend on i915 to handle the
524 * backlight. This will leave the backlight on unnecessarily when
525 * another client is not activated.
526 */
527 if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
528 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
529 return;
530 }
531
515 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 532 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
516 533
517 dev_priv->backlight.enabled = false; 534 dev_priv->backlight.enabled = false;
@@ -580,7 +597,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
580 POSTING_READ(reg); 597 POSTING_READ(reg);
581 I915_WRITE(reg, tmp | BLM_PWM_ENABLE); 598 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
582 599
583 if (HAS_PCH_SPLIT(dev)) { 600 if (HAS_PCH_SPLIT(dev) &&
601 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
584 tmp = I915_READ(BLC_PWM_PCH_CTL1); 602 tmp = I915_READ(BLC_PWM_PCH_CTL1);
585 tmp |= BLM_PCH_PWM_ENABLE; 603 tmp |= BLM_PCH_PWM_ENABLE;
586 tmp &= ~BLM_PCH_OVERRIDE_ENABLE; 604 tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ccbdd83f5220..b0e4a0bd1313 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5063 } 5063 }
5064 } else { 5064 } else {
5065 if (enable_requested) { 5065 if (enable_requested) {
5066 unsigned long irqflags;
5067 enum pipe p;
5068
5066 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5069 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5070 POSTING_READ(HSW_PWR_WELL_DRIVER);
5067 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 5071 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5072
5073 /*
5074 * After this, the registers on the pipes that are part
5075 * of the power well will become zero, so we have to
5076 * adjust our counters according to that.
5077 *
5078 * FIXME: Should we do this in general in
5079 * drm_vblank_post_modeset?
5080 */
5081 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5082 for_each_pipe(p)
5083 if (p != PIPE_A)
5084 dev->last_vblank[p] = 0;
5085 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5068 } 5086 }
5069 } 5087 }
5070} 5088}
@@ -5476,7 +5494,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
5476 gen6_gt_check_fifodbg(dev_priv); 5494 gen6_gt_check_fifodbg(dev_priv);
5477} 5495}
5478 5496
5479void intel_gt_reset(struct drm_device *dev) 5497void intel_gt_sanitize(struct drm_device *dev)
5480{ 5498{
5481 struct drm_i915_private *dev_priv = dev->dev_private; 5499 struct drm_i915_private *dev_priv = dev->dev_private;
5482 5500
@@ -5487,26 +5505,61 @@ void intel_gt_reset(struct drm_device *dev)
5487 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 5505 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5488 __gen6_gt_force_wake_mt_reset(dev_priv); 5506 __gen6_gt_force_wake_mt_reset(dev_priv);
5489 } 5507 }
5508
5509 /* BIOS often leaves RC6 enabled, but disable it for hw init */
5510 if (INTEL_INFO(dev)->gen >= 6)
5511 intel_disable_gt_powersave(dev);
5490} 5512}
5491 5513
5492void intel_gt_init(struct drm_device *dev) 5514void intel_gt_init(struct drm_device *dev)
5493{ 5515{
5494 struct drm_i915_private *dev_priv = dev->dev_private; 5516 struct drm_i915_private *dev_priv = dev->dev_private;
5495 5517
5496 spin_lock_init(&dev_priv->gt_lock);
5497
5498 intel_gt_reset(dev);
5499
5500 if (IS_VALLEYVIEW(dev)) { 5518 if (IS_VALLEYVIEW(dev)) {
5501 dev_priv->gt.force_wake_get = vlv_force_wake_get; 5519 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5502 dev_priv->gt.force_wake_put = vlv_force_wake_put; 5520 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5503 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 5521 } else if (IS_HASWELL(dev)) {
5504 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; 5522 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5505 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; 5523 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5524 } else if (IS_IVYBRIDGE(dev)) {
5525 u32 ecobus;
5526
5527 /* IVB configs may use multi-threaded forcewake */
5528
5529 /* A small trick here - if the bios hasn't configured
5530 * MT forcewake, and if the device is in RC6, then
5531 * force_wake_mt_get will not wake the device and the
5532 * ECOBUS read will return zero. Which will be
5533 * (correctly) interpreted by the test below as MT
5534 * forcewake being disabled.
5535 */
5536 mutex_lock(&dev->struct_mutex);
5537 __gen6_gt_force_wake_mt_get(dev_priv);
5538 ecobus = I915_READ_NOTRACE(ECOBUS);
5539 __gen6_gt_force_wake_mt_put(dev_priv);
5540 mutex_unlock(&dev->struct_mutex);
5541
5542 if (ecobus & FORCEWAKE_MT_ENABLE) {
5543 dev_priv->gt.force_wake_get =
5544 __gen6_gt_force_wake_mt_get;
5545 dev_priv->gt.force_wake_put =
5546 __gen6_gt_force_wake_mt_put;
5547 } else {
5548 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5549 DRM_INFO("when using vblank-synced partial screen updates.\n");
5550 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5551 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5552 }
5506 } else if (IS_GEN6(dev)) { 5553 } else if (IS_GEN6(dev)) {
5507 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; 5554 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5508 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; 5555 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5509 } 5556 }
5557}
5558
5559void intel_pm_init(struct drm_device *dev)
5560{
5561 struct drm_i915_private *dev_priv = dev->dev_private;
5562
5510 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5563 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5511 intel_gen6_powersave_work); 5564 intel_gen6_powersave_work);
5512} 5565}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e51ab552046c..664118d8c1d6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
379 return I915_READ(acthd_reg); 379 return I915_READ(acthd_reg);
380} 380}
381 381
382static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
383{
384 struct drm_i915_private *dev_priv = ring->dev->dev_private;
385 u32 addr;
386
387 addr = dev_priv->status_page_dmah->busaddr;
388 if (INTEL_INFO(ring->dev)->gen >= 4)
389 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
390 I915_WRITE(HWS_PGA, addr);
391}
392
382static int init_ring_common(struct intel_ring_buffer *ring) 393static int init_ring_common(struct intel_ring_buffer *ring)
383{ 394{
384 struct drm_device *dev = ring->dev; 395 struct drm_device *dev = ring->dev;
@@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
390 if (HAS_FORCE_WAKE(dev)) 401 if (HAS_FORCE_WAKE(dev))
391 gen6_gt_force_wake_get(dev_priv); 402 gen6_gt_force_wake_get(dev_priv);
392 403
404 if (I915_NEED_GFX_HWS(dev))
405 intel_ring_setup_status_page(ring);
406 else
407 ring_setup_phys_status_page(ring);
408
393 /* Stop the ring if it's running. */ 409 /* Stop the ring if it's running. */
394 I915_WRITE_CTL(ring, 0); 410 I915_WRITE_CTL(ring, 0);
395 I915_WRITE_HEAD(ring, 0); 411 I915_WRITE_HEAD(ring, 0);
@@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
518 struct pipe_control *pc = ring->private; 534 struct pipe_control *pc = ring->private;
519 struct drm_i915_gem_object *obj; 535 struct drm_i915_gem_object *obj;
520 536
521 if (!ring->private)
522 return;
523
524 obj = pc->obj; 537 obj = pc->obj;
525 538
526 kunmap(sg_page(obj->pages->sgl)); 539 kunmap(sg_page(obj->pages->sgl));
@@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
528 drm_gem_object_unreference(&obj->base); 541 drm_gem_object_unreference(&obj->base);
529 542
530 kfree(pc); 543 kfree(pc);
531 ring->private = NULL;
532} 544}
533 545
534static int init_render_ring(struct intel_ring_buffer *ring) 546static int init_render_ring(struct intel_ring_buffer *ring)
@@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
601 if (HAS_BROKEN_CS_TLB(dev)) 613 if (HAS_BROKEN_CS_TLB(dev))
602 drm_gem_object_unreference(to_gem_object(ring->private)); 614 drm_gem_object_unreference(to_gem_object(ring->private));
603 615
604 cleanup_pipe_control(ring); 616 if (INTEL_INFO(dev)->gen >= 5)
617 cleanup_pipe_control(ring);
618
619 ring->private = NULL;
605} 620}
606 621
607static void 622static void
@@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
1223 ring->status_page.obj = obj; 1238 ring->status_page.obj = obj;
1224 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1239 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1225 1240
1226 intel_ring_setup_status_page(ring);
1227 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1241 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1228 ring->name, ring->status_page.gfx_addr); 1242 ring->name, ring->status_page.gfx_addr);
1229 1243
@@ -1237,10 +1251,9 @@ err:
1237 return ret; 1251 return ret;
1238} 1252}
1239 1253
1240static int init_phys_hws_pga(struct intel_ring_buffer *ring) 1254static int init_phys_status_page(struct intel_ring_buffer *ring)
1241{ 1255{
1242 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1256 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1243 u32 addr;
1244 1257
1245 if (!dev_priv->status_page_dmah) { 1258 if (!dev_priv->status_page_dmah) {
1246 dev_priv->status_page_dmah = 1259 dev_priv->status_page_dmah =
@@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring)
1249 return -ENOMEM; 1262 return -ENOMEM;
1250 } 1263 }
1251 1264
1252 addr = dev_priv->status_page_dmah->busaddr;
1253 if (INTEL_INFO(ring->dev)->gen >= 4)
1254 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
1255 I915_WRITE(HWS_PGA, addr);
1256
1257 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1265 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1258 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1266 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1259 1267
@@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1281 return ret; 1289 return ret;
1282 } else { 1290 } else {
1283 BUG_ON(ring->id != RCS); 1291 BUG_ON(ring->id != RCS);
1284 ret = init_phys_hws_pga(ring); 1292 ret = init_phys_status_page(ring);
1285 if (ret) 1293 if (ret)
1286 return ret; 1294 return ret;
1287 } 1295 }
@@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1893 } 1901 }
1894 1902
1895 if (!I915_NEED_GFX_HWS(dev)) { 1903 if (!I915_NEED_GFX_HWS(dev)) {
1896 ret = init_phys_hws_pga(ring); 1904 ret = init_phys_status_page(ring);
1897 if (ret) 1905 if (ret)
1898 return ret; 1906 return ret;
1899 } 1907 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 251784aa2225..503a414cbdad 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
29 struct mga_crtc *mga_crtc = to_mga_crtc(crtc); 29 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
30 struct drm_device *dev = crtc->dev; 30 struct drm_device *dev = crtc->dev;
31 struct mga_device *mdev = dev->dev_private; 31 struct mga_device *mdev = dev->dev_private;
32 struct drm_framebuffer *fb = crtc->fb;
32 int i; 33 int i;
33 34
34 if (!crtc->enabled) 35 if (!crtc->enabled)
@@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
36 37
37 WREG8(DAC_INDEX + MGA1064_INDEX, 0); 38 WREG8(DAC_INDEX + MGA1064_INDEX, 0);
38 39
40 if (fb && fb->bits_per_pixel == 16) {
41 int inc = (fb->depth == 15) ? 8 : 4;
42 u8 r, b;
43 for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
44 if (fb->depth == 16) {
45 if (i > (MGAG200_LUT_SIZE >> 1)) {
46 r = b = 0;
47 } else {
48 r = mga_crtc->lut_r[i << 1];
49 b = mga_crtc->lut_b[i << 1];
50 }
51 } else {
52 r = mga_crtc->lut_r[i];
53 b = mga_crtc->lut_b[i];
54 }
55 /* VGA registers */
56 WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
57 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
58 WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
59 }
60 return;
61 }
39 for (i = 0; i < MGAG200_LUT_SIZE; i++) { 62 for (i = 0; i < MGAG200_LUT_SIZE; i++) {
40 /* VGA registers */ 63 /* VGA registers */
41 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); 64 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
@@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
877 900
878 pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); 901 pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
879 if (crtc->fb->bits_per_pixel == 24) 902 if (crtc->fb->bits_per_pixel == 24)
880 pitch = pitch >> (4 - bppshift); 903 pitch = (pitch * 3) >> (4 - bppshift);
881 else 904 else
882 pitch = pitch >> (4 - bppshift); 905 pitch = pitch >> (4 - bppshift);
883 906
@@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
1251 kfree(mga_crtc); 1274 kfree(mga_crtc);
1252} 1275}
1253 1276
1277static void mga_crtc_disable(struct drm_crtc *crtc)
1278{
1279 int ret;
1280 DRM_DEBUG_KMS("\n");
1281 mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1282 if (crtc->fb) {
1283 struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb);
1284 struct drm_gem_object *obj = mga_fb->obj;
1285 struct mgag200_bo *bo = gem_to_mga_bo(obj);
1286 ret = mgag200_bo_reserve(bo, false);
1287 if (ret)
1288 return;
1289 mgag200_bo_push_sysram(bo);
1290 mgag200_bo_unreserve(bo);
1291 }
1292 crtc->fb = NULL;
1293}
1294
1254/* These provide the minimum set of functions required to handle a CRTC */ 1295/* These provide the minimum set of functions required to handle a CRTC */
1255static const struct drm_crtc_funcs mga_crtc_funcs = { 1296static const struct drm_crtc_funcs mga_crtc_funcs = {
1256 .cursor_set = mga_crtc_cursor_set, 1297 .cursor_set = mga_crtc_cursor_set,
@@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
1261}; 1302};
1262 1303
1263static const struct drm_crtc_helper_funcs mga_helper_funcs = { 1304static const struct drm_crtc_helper_funcs mga_helper_funcs = {
1305 .disable = mga_crtc_disable,
1264 .dpms = mga_crtc_dpms, 1306 .dpms = mga_crtc_dpms,
1265 .mode_fixup = mga_crtc_mode_fixup, 1307 .mode_fixup = mga_crtc_mode_fixup,
1266 .mode_set = mga_crtc_mode_set, 1308 .mode_set = mga_crtc_mode_set,
@@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
1581 1623
1582 drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); 1624 drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
1583 1625
1626 drm_sysfs_connector_add(connector);
1627
1584 mga_connector->i2c = mgag200_i2c_create(dev); 1628 mga_connector->i2c = mgag200_i2c_create(dev);
1585 if (!mga_connector->i2c) 1629 if (!mga_connector->i2c)
1586 DRM_ERROR("failed to add ddc bus\n"); 1630 DRM_ERROR("failed to add ddc bus\n");
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3acb2b044c7b..d70e4a92773b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
323 323
324 mgabo->gem.driver_private = NULL; 324 mgabo->gem.driver_private = NULL;
325 mgabo->bo.bdev = &mdev->ttm.bdev; 325 mgabo->bo.bdev = &mdev->ttm.bdev;
326 mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
326 327
327 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 328 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
328 329
@@ -353,6 +354,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
353 bo->pin_count++; 354 bo->pin_count++;
354 if (gpu_addr) 355 if (gpu_addr)
355 *gpu_addr = mgag200_bo_gpu_offset(bo); 356 *gpu_addr = mgag200_bo_gpu_offset(bo);
357 return 0;
356 } 358 }
357 359
358 mgag200_ttm_placement(bo, pl_flag); 360 mgag200_ttm_placement(bo, pl_flag);
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
index 262c9f5f5f60..ce860de43e61 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00008000; 92 nv_subdev(priv)->unit = 0x00008000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nvc0_bsp_cclass; 94 nv_engine(priv)->cclass = &nvc0_bsp_cclass;
94 nv_engine(priv)->sclass = nvc0_bsp_sclass; 95 nv_engine(priv)->sclass = nvc0_bsp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
index c46882c83982..ba6aeca0285e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00008000; 92 nv_subdev(priv)->unit = 0x00008000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nve0_bsp_cclass; 94 nv_engine(priv)->cclass = &nve0_bsp_cclass;
94 nv_engine(priv)->sclass = nve0_bsp_sclass; 95 nv_engine(priv)->sclass = nve0_bsp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index 373dbcc523b2..a19e7d79b847 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
36 if (data && data[0]) { 36 if (data && data[0]) {
37 for (i = 0; i < size; i++) 37 for (i = 0; i < size; i++)
38 nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); 38 nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
39 for (; i < 0x60; i++)
40 nv_wr32(priv, 0x61c440 + soff, (i << 8));
39 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); 41 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
40 } else 42 } else
41 if (data) { 43 if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index dc57e24fc1df..717639386ced 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
41 if (data && data[0]) { 41 if (data && data[0]) {
42 for (i = 0; i < size; i++) 42 for (i = 0; i < size; i++)
43 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); 43 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
44 for (; i < 0x60; i++)
45 nv_wr32(priv, 0x10ec00 + soff, (i << 8));
44 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); 46 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
45 } else 47 } else
46 if (data) { 48 if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index ab1e918469a8..526b75242899 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,14 +47,8 @@ int
47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) 47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
48{ 48{
49 struct nv50_disp_priv *priv = (void *)object->engine; 49 struct nv50_disp_priv *priv = (void *)object->engine;
50 struct nouveau_bios *bios = nouveau_bios(priv);
51 const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
52 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; 50 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
53 const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
54 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); 51 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
55 const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
56 struct dcb_output outp;
57 u8 ver, hdr;
58 u32 data; 52 u32 data;
59 int ret = -EINVAL; 53 int ret = -EINVAL;
60 54
@@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
62 return -EINVAL; 56 return -EINVAL;
63 data = *(u32 *)args; 57 data = *(u32 *)args;
64 58
65 if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
66 return -ENODEV;
67 59
68 switch (mthd & ~0x3f) { 60 switch (mthd & ~0x3f) {
69 case NV50_DISP_SOR_PWR: 61 case NV50_DISP_SOR_PWR:
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index 3c7a31f7590e..e03fc8e4dc1d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -23,6 +23,25 @@
23#include <engine/falcon.h> 23#include <engine/falcon.h>
24#include <subdev/timer.h> 24#include <subdev/timer.h>
25 25
26void
27nouveau_falcon_intr(struct nouveau_subdev *subdev)
28{
29 struct nouveau_falcon *falcon = (void *)subdev;
30 u32 dispatch = nv_ro32(falcon, 0x01c);
31 u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
32
33 if (intr & 0x00000010) {
34 nv_debug(falcon, "ucode halted\n");
35 nv_wo32(falcon, 0x004, 0x00000010);
36 intr &= ~0x00000010;
37 }
38
39 if (intr) {
40 nv_error(falcon, "unhandled intr 0x%08x\n", intr);
41 nv_wo32(falcon, 0x004, intr);
42 }
43}
44
26u32 45u32
27_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) 46_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
28{ 47{
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 49ecbb859b25..c19004301309 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
265int 265int
266nv31_mpeg_init(struct nouveau_object *object) 266nv31_mpeg_init(struct nouveau_object *object)
267{ 267{
268 struct nouveau_engine *engine = nv_engine(object->engine); 268 struct nouveau_engine *engine = nv_engine(object);
269 struct nv31_mpeg_priv *priv = (void *)engine; 269 struct nv31_mpeg_priv *priv = (void *)object;
270 struct nouveau_fb *pfb = nouveau_fb(object); 270 struct nouveau_fb *pfb = nouveau_fb(object);
271 int ret, i; 271 int ret, i;
272 272
@@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object)
284 /* PMPEG init */ 284 /* PMPEG init */
285 nv_wr32(priv, 0x00b32c, 0x00000000); 285 nv_wr32(priv, 0x00b32c, 0x00000000);
286 nv_wr32(priv, 0x00b314, 0x00000100); 286 nv_wr32(priv, 0x00b314, 0x00000100);
287 nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); 287 if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
288 nv_wr32(priv, 0x00b220, 0x00000044);
289 else
290 nv_wr32(priv, 0x00b220, 0x00000031);
288 nv_wr32(priv, 0x00b300, 0x02001ec1); 291 nv_wr32(priv, 0x00b300, 0x02001ec1);
289 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); 292 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
290 293
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index f7c581ad1991..dd6196072e9c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent,
61 if (ret) 61 if (ret)
62 return ret; 62 return ret;
63 63
64 nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
64 return 0; 65 return 0;
65} 66}
66 67
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
index 98072c1ff360..73719aaa62d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00000002; 92 nv_subdev(priv)->unit = 0x00000002;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nvc0_ppp_cclass; 94 nv_engine(priv)->cclass = &nvc0_ppp_cclass;
94 nv_engine(priv)->sclass = nvc0_ppp_sclass; 95 nv_engine(priv)->sclass = nvc0_ppp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
index 1879229b60eb..ac1f62aace72 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00020000; 92 nv_subdev(priv)->unit = 0x00020000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nvc0_vp_cclass; 94 nv_engine(priv)->cclass = &nvc0_vp_cclass;
94 nv_engine(priv)->sclass = nvc0_vp_sclass; 95 nv_engine(priv)->sclass = nvc0_vp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
index d28ecbf7bc49..d4c3108479c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00020000; 92 nv_subdev(priv)->unit = 0x00020000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nve0_vp_cclass; 94 nv_engine(priv)->cclass = &nve0_vp_cclass;
94 nv_engine(priv)->sclass = nve0_vp_sclass; 95 nv_engine(priv)->sclass = nve0_vp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 0639bc59d0a5..5f6ede7c4892 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object)
118 return ret; 118 return ret;
119 } 119 }
120 120
121 ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0, 121 if (fw->size > 0x40000) {
122 nv_warn(xtensa, "firmware %s too large\n", name);
123 release_firmware(fw);
124 return -EINVAL;
125 }
126
127 ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
122 &xtensa->gpu_fw); 128 &xtensa->gpu_fw);
123 if (ret) { 129 if (ret) {
124 release_firmware(fw); 130 release_firmware(fw);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
index 1edec386ab36..181aa7da524d 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
72 struct nouveau_oclass *, u32, bool, const char *, 72 struct nouveau_oclass *, u32, bool, const char *,
73 const char *, int, void **); 73 const char *, int, void **);
74 74
75void nouveau_falcon_intr(struct nouveau_subdev *subdev);
76
75#define _nouveau_falcon_dtor _nouveau_engine_dtor 77#define _nouveau_falcon_dtor _nouveau_engine_dtor
76int _nouveau_falcon_init(struct nouveau_object *); 78int _nouveau_falcon_init(struct nouveau_object *);
77int _nouveau_falcon_fini(struct nouveau_object *, bool); 79int _nouveau_falcon_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index f2e87b105666..fcf57fa309bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -55,7 +55,7 @@ struct nouveau_vma {
55struct nouveau_vm { 55struct nouveau_vm {
56 struct nouveau_vmmgr *vmm; 56 struct nouveau_vmmgr *vmm;
57 struct nouveau_mm mm; 57 struct nouveau_mm mm;
58 int refcount; 58 struct kref refcount;
59 59
60 struct list_head pgd_list; 60 struct list_head pgd_list;
61 atomic_t engref[NVDEV_SUBDEV_NR]; 61 atomic_t engref[NVDEV_SUBDEV_NR];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 6c974dd83e8b..db9d6ddde52c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
81void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, 81void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
82 u32 pitch, u32 flags, struct nouveau_fb_tile *); 82 u32 pitch, u32 flags, struct nouveau_fb_tile *);
83 83
84void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **); 84void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
85extern int nv50_fb_memtype[0x80]; 85extern int nv50_fb_memtype[0x80];
86 86
87#endif 87#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index af5aa7ee8ad9..903baff77fdd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -27,17 +27,10 @@
27#include "priv.h" 27#include "priv.h"
28 28
29void 29void
30nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) 30__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
31{ 31{
32 struct nouveau_mm_node *this; 32 struct nouveau_mm_node *this;
33 struct nouveau_mem *mem;
34 33
35 mem = *pmem;
36 *pmem = NULL;
37 if (unlikely(mem == NULL))
38 return;
39
40 mutex_lock(&pfb->base.mutex);
41 while (!list_empty(&mem->regions)) { 34 while (!list_empty(&mem->regions)) {
42 this = list_first_entry(&mem->regions, typeof(*this), rl_entry); 35 this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
43 36
@@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
46 } 39 }
47 40
48 nouveau_mm_free(&pfb->tags, &mem->tag); 41 nouveau_mm_free(&pfb->tags, &mem->tag);
42}
43
44void
45nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
46{
47 struct nouveau_mem *mem = *pmem;
48
49 *pmem = NULL;
50 if (unlikely(mem == NULL))
51 return;
52
53 mutex_lock(&pfb->base.mutex);
54 __nv50_ram_put(pfb, mem);
49 mutex_unlock(&pfb->base.mutex); 55 mutex_unlock(&pfb->base.mutex);
50 56
51 kfree(mem); 57 kfree(mem);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 9c3634acbb9d..cf97c4de4a6b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -33,11 +33,19 @@ void
33nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) 33nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
34{ 34{
35 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); 35 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
36 struct nouveau_mem *mem = *pmem;
36 37
37 if ((*pmem)->tag) 38 *pmem = NULL;
38 ltcg->tags_free(ltcg, &(*pmem)->tag); 39 if (unlikely(mem == NULL))
40 return;
39 41
40 nv50_ram_put(pfb, pmem); 42 mutex_lock(&pfb->base.mutex);
43 if (mem->tag)
44 ltcg->tags_free(ltcg, &mem->tag);
45 __nv50_ram_put(pfb, mem);
46 mutex_unlock(&pfb->base.mutex);
47
48 kfree(mem);
41} 49}
42 50
43int 51int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf489dcf46e2..c4c1d415e7fe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
103 int i; 103 int i;
104 104
105 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); 105 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
106 if (nv_device(priv)->chipset >= 0x90) 106 if (nv_device(priv)->chipset > 0x92)
107 intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); 107 intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
108 108
109 hi = (intr0 & 0x0000ffff) | (intr1 << 16); 109 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
@@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
115 } 115 }
116 116
117 nv_wr32(priv, 0xe054, intr0); 117 nv_wr32(priv, 0xe054, intr0);
118 if (nv_device(priv)->chipset >= 0x90) 118 if (nv_device(priv)->chipset > 0x92)
119 nv_wr32(priv, 0xe074, intr1); 119 nv_wr32(priv, 0xe074, intr1);
120} 120}
121 121
@@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
146 int ret; 146 int ret;
147 147
148 ret = nouveau_gpio_create(parent, engine, oclass, 148 ret = nouveau_gpio_create(parent, engine, oclass,
149 nv_device(parent)->chipset >= 0x90 ? 32 : 16, 149 nv_device(parent)->chipset > 0x92 ? 32 : 16,
150 &priv); 150 &priv);
151 *pobject = nv_object(priv); 151 *pobject = nv_object(priv);
152 if (ret) 152 if (ret)
@@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object)
182 /* disable, and ack any pending gpio interrupts */ 182 /* disable, and ack any pending gpio interrupts */
183 nv_wr32(priv, 0xe050, 0x00000000); 183 nv_wr32(priv, 0xe050, 0x00000000);
184 nv_wr32(priv, 0xe054, 0xffffffff); 184 nv_wr32(priv, 0xe054, 0xffffffff);
185 if (nv_device(priv)->chipset >= 0x90) { 185 if (nv_device(priv)->chipset > 0x92) {
186 nv_wr32(priv, 0xe070, 0x00000000); 186 nv_wr32(priv, 0xe070, 0x00000000);
187 nv_wr32(priv, 0xe074, 0xffffffff); 187 nv_wr32(priv, 0xe074, 0xffffffff);
188 } 188 }
@@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend)
195{ 195{
196 struct nv50_gpio_priv *priv = (void *)object; 196 struct nv50_gpio_priv *priv = (void *)object;
197 nv_wr32(priv, 0xe050, 0x00000000); 197 nv_wr32(priv, 0xe050, 0x00000000);
198 if (nv_device(priv)->chipset >= 0x90) 198 if (nv_device(priv)->chipset > 0x92)
199 nv_wr32(priv, 0xe070, 0x00000000); 199 nv_wr32(priv, 0xe070, 0x00000000);
200 return nouveau_gpio_fini(&priv->base, suspend); 200 return nouveau_gpio_fini(&priv->base, suspend);
201} 201}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 0cb322a5e72c..f25fc5fc7dd1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -41,7 +41,7 @@ nv50_mc_intr[] = {
41 { 0x04000000, NVDEV_ENGINE_DISP }, 41 { 0x04000000, NVDEV_ENGINE_DISP },
42 { 0x10000000, NVDEV_SUBDEV_BUS }, 42 { 0x10000000, NVDEV_SUBDEV_BUS },
43 { 0x80000000, NVDEV_ENGINE_SW }, 43 { 0x80000000, NVDEV_ENGINE_SW },
44 { 0x0000d101, NVDEV_SUBDEV_FB }, 44 { 0x0002d101, NVDEV_SUBDEV_FB },
45 {}, 45 {},
46}; 46};
47 47
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 67fcb6c852ac..ef3133e7575c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
361 361
362 INIT_LIST_HEAD(&vm->pgd_list); 362 INIT_LIST_HEAD(&vm->pgd_list);
363 vm->vmm = vmm; 363 vm->vmm = vmm;
364 vm->refcount = 1; 364 kref_init(&vm->refcount);
365 vm->fpde = offset >> (vmm->pgt_bits + 12); 365 vm->fpde = offset >> (vmm->pgt_bits + 12);
366 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); 366 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
367 367
@@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
441} 441}
442 442
443static void 443static void
444nouveau_vm_del(struct nouveau_vm *vm) 444nouveau_vm_del(struct kref *kref)
445{ 445{
446 struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
446 struct nouveau_vm_pgd *vpgd, *tmp; 447 struct nouveau_vm_pgd *vpgd, *tmp;
447 448
448 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 449 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
@@ -458,27 +459,19 @@ int
458nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, 459nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
459 struct nouveau_gpuobj *pgd) 460 struct nouveau_gpuobj *pgd)
460{ 461{
461 struct nouveau_vm *vm; 462 if (ref) {
462 int ret; 463 int ret = nouveau_vm_link(ref, pgd);
463
464 vm = ref;
465 if (vm) {
466 ret = nouveau_vm_link(vm, pgd);
467 if (ret) 464 if (ret)
468 return ret; 465 return ret;
469 466
470 vm->refcount++; 467 kref_get(&ref->refcount);
471 } 468 }
472 469
473 vm = *ptr; 470 if (*ptr) {
474 *ptr = ref; 471 nouveau_vm_unlink(*ptr, pgd);
475 472 kref_put(&(*ptr)->refcount, nouveau_vm_del);
476 if (vm) {
477 nouveau_vm_unlink(vm, pgd);
478
479 if (--vm->refcount == 0)
480 nouveau_vm_del(vm);
481 } 473 }
482 474
475 *ptr = ref;
483 return 0; 476 return 0;
484} 477}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4b1afb131380..af20fba3a1a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
148 148
149 if (unlikely(nvbo->gem)) 149 if (unlikely(nvbo->gem))
150 DRM_ERROR("bo %p still attached to GEM object\n", bo); 150 DRM_ERROR("bo %p still attached to GEM object\n", bo);
151 WARN_ON(nvbo->pin_refcnt > 0);
151 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
152 kfree(nvbo); 153 kfree(nvbo);
153} 154}
@@ -197,6 +198,17 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
197 size_t acc_size; 198 size_t acc_size;
198 int ret; 199 int ret;
199 int type = ttm_bo_type_device; 200 int type = ttm_bo_type_device;
201 int lpg_shift = 12;
202 int max_size;
203
204 if (drm->client.base.vm)
205 lpg_shift = drm->client.base.vm->vmm->lpg_shift;
206 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
207
208 if (size <= 0 || size > max_size) {
209 nv_warn(drm, "skipped size %x\n", (u32)size);
210 return -EINVAL;
211 }
200 212
201 if (sg) 213 if (sg)
202 type = ttm_bo_type_sg; 214 type = ttm_bo_type_sg;
@@ -340,13 +352,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
340{ 352{
341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 353 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
342 struct ttm_buffer_object *bo = &nvbo->bo; 354 struct ttm_buffer_object *bo = &nvbo->bo;
343 int ret; 355 int ret, ref;
344 356
345 ret = ttm_bo_reserve(bo, false, false, false, 0); 357 ret = ttm_bo_reserve(bo, false, false, false, 0);
346 if (ret) 358 if (ret)
347 return ret; 359 return ret;
348 360
349 if (--nvbo->pin_refcnt) 361 ref = --nvbo->pin_refcnt;
362 WARN_ON_ONCE(ref < 0);
363 if (ref)
350 goto out; 364 goto out;
351 365
352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 366 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
@@ -578,7 +592,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
578 int ret = RING_SPACE(chan, 2); 592 int ret = RING_SPACE(chan, 2);
579 if (ret == 0) { 593 if (ret == 0) {
580 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); 594 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
581 OUT_RING (chan, handle); 595 OUT_RING (chan, handle & 0x0000ffff);
582 FIRE_RING (chan); 596 FIRE_RING (chan);
583 } 597 }
584 return ret; 598 return ret;
@@ -973,7 +987,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
973 struct ttm_mem_reg *old_mem = &bo->mem; 987 struct ttm_mem_reg *old_mem = &bo->mem;
974 int ret; 988 int ret;
975 989
976 mutex_lock(&chan->cli->mutex); 990 mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
977 991
978 /* create temporary vmas for the transfer and attach them to the 992 /* create temporary vmas for the transfer and attach them to the
979 * old nouveau_mem node, these will get cleaned up after ttm has 993 * old nouveau_mem node, these will get cleaned up after ttm has
@@ -1014,7 +1028,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1014 struct ttm_mem_reg *, struct ttm_mem_reg *); 1028 struct ttm_mem_reg *, struct ttm_mem_reg *);
1015 int (*init)(struct nouveau_channel *, u32 handle); 1029 int (*init)(struct nouveau_channel *, u32 handle);
1016 } _methods[] = { 1030 } _methods[] = {
1017 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1031 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1018 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1032 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1019 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 1033 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1020 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 1034 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
@@ -1034,7 +1048,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1034 struct nouveau_channel *chan; 1048 struct nouveau_channel *chan;
1035 u32 handle = (mthd->engine << 16) | mthd->oclass; 1049 u32 handle = (mthd->engine << 16) | mthd->oclass;
1036 1050
1037 if (mthd->init == nve0_bo_move_init) 1051 if (mthd->engine)
1038 chan = drm->cechan; 1052 chan = drm->cechan;
1039 else 1053 else
1040 chan = drm->channel; 1054 chan = drm->channel;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 708b2d1c0037..907d20ef6d4d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
138{ 138{
139 struct nouveau_framebuffer *nouveau_fb; 139 struct nouveau_framebuffer *nouveau_fb;
140 struct drm_gem_object *gem; 140 struct drm_gem_object *gem;
141 int ret; 141 int ret = -ENOMEM;
142 142
143 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 143 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
144 if (!gem) 144 if (!gem)
@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
146 146
147 nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); 147 nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
148 if (!nouveau_fb) 148 if (!nouveau_fb)
149 return ERR_PTR(-ENOMEM); 149 goto err_unref;
150 150
151 ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); 151 ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
152 if (ret) { 152 if (ret)
153 drm_gem_object_unreference(gem); 153 goto err;
154 return ERR_PTR(ret);
155 }
156 154
157 return &nouveau_fb->base; 155 return &nouveau_fb->base;
156
157err:
158 kfree(nouveau_fb);
159err_unref:
160 drm_gem_object_unreference(gem);
161 return ERR_PTR(ret);
158} 162}
159 163
160static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 164static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
524 struct nouveau_page_flip_state *s; 528 struct nouveau_page_flip_state *s;
525 struct nouveau_channel *chan = NULL; 529 struct nouveau_channel *chan = NULL;
526 struct nouveau_fence *fence; 530 struct nouveau_fence *fence;
527 struct list_head res; 531 struct ttm_validate_buffer resv[2] = {
528 struct ttm_validate_buffer res_val[2]; 532 { .bo = &old_bo->bo },
533 { .bo = &new_bo->bo },
534 };
529 struct ww_acquire_ctx ticket; 535 struct ww_acquire_ctx ticket;
536 LIST_HEAD(res);
530 int ret; 537 int ret;
531 538
532 if (!drm->channel) 539 if (!drm->channel)
@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
545 chan = drm->channel; 552 chan = drm->channel;
546 spin_unlock(&old_bo->bo.bdev->fence_lock); 553 spin_unlock(&old_bo->bo.bdev->fence_lock);
547 554
548 mutex_lock(&chan->cli->mutex);
549
550 if (new_bo != old_bo) { 555 if (new_bo != old_bo) {
551 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 556 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
552 if (likely(!ret)) { 557 if (ret)
553 res_val[0].bo = &old_bo->bo; 558 goto fail_free;
554 res_val[1].bo = &new_bo->bo;
555 INIT_LIST_HEAD(&res);
556 list_add_tail(&res_val[0].head, &res);
557 list_add_tail(&res_val[1].head, &res);
558 ret = ttm_eu_reserve_buffers(&ticket, &res);
559 if (ret)
560 nouveau_bo_unpin(new_bo);
561 }
562 } else
563 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
564 559
565 if (ret) { 560 list_add(&resv[1].head, &res);
566 mutex_unlock(&chan->cli->mutex);
567 goto fail_free;
568 } 561 }
562 list_add(&resv[0].head, &res);
563
564 mutex_lock(&chan->cli->mutex);
565 ret = ttm_eu_reserve_buffers(&ticket, &res);
566 if (ret)
567 goto fail_unpin;
569 568
570 /* Initialize a page flip struct */ 569 /* Initialize a page flip struct */
571 *s = (struct nouveau_page_flip_state) 570 *s = (struct nouveau_page_flip_state)
@@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
576 /* Emit a page flip */ 575 /* Emit a page flip */
577 if (nv_device(drm->device)->card_type >= NV_50) { 576 if (nv_device(drm->device)->card_type >= NV_50) {
578 ret = nv50_display_flip_next(crtc, fb, chan, 0); 577 ret = nv50_display_flip_next(crtc, fb, chan, 0);
579 if (ret) { 578 if (ret)
580 mutex_unlock(&chan->cli->mutex);
581 goto fail_unreserve; 579 goto fail_unreserve;
582 }
583 } 580 }
584 581
585 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 582 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
590 /* Update the crtc struct and cleanup */ 587 /* Update the crtc struct and cleanup */
591 crtc->fb = fb; 588 crtc->fb = fb;
592 589
593 if (old_bo != new_bo) { 590 ttm_eu_fence_buffer_objects(&ticket, &res, fence);
594 ttm_eu_fence_buffer_objects(&ticket, &res, fence); 591 if (old_bo != new_bo)
595 nouveau_bo_unpin(old_bo); 592 nouveau_bo_unpin(old_bo);
596 } else {
597 nouveau_bo_fence(new_bo, fence);
598 ttm_bo_unreserve(&new_bo->bo);
599 }
600 nouveau_fence_unref(&fence); 593 nouveau_fence_unref(&fence);
601 return 0; 594 return 0;
602 595
603fail_unreserve: 596fail_unreserve:
604 if (old_bo != new_bo) { 597 ttm_eu_backoff_reservation(&ticket, &res);
605 ttm_eu_backoff_reservation(&ticket, &res); 598fail_unpin:
599 mutex_unlock(&chan->cli->mutex);
600 if (old_bo != new_bo)
606 nouveau_bo_unpin(new_bo); 601 nouveau_bo_unpin(new_bo);
607 } else
608 ttm_bo_unreserve(&new_bo->bo);
609fail_free: 602fail_free:
610 kfree(s); 603 kfree(s);
611 return ret; 604 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 218a4b522fe5..61972668fd05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm)
192 192
193 arg0 = NVE0_CHANNEL_IND_ENGINE_GR; 193 arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
194 arg1 = 1; 194 arg1 = 1;
195 } else
196 if (device->chipset >= 0xa3 &&
197 device->chipset != 0xaa &&
198 device->chipset != 0xac) {
199 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
200 NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
201 &drm->cechan);
202 if (ret)
203 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
204
205 arg0 = NvDmaFB;
206 arg1 = NvDmaTT;
195 } else { 207 } else {
196 arg0 = NvDmaFB; 208 arg0 = NvDmaFB;
197 arg1 = NvDmaTT; 209 arg1 = NvDmaTT;
@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
284 return 0; 296 return 0;
285} 297}
286 298
287static struct lock_class_key drm_client_lock_class_key;
288
289static int 299static int
290nouveau_drm_load(struct drm_device *dev, unsigned long flags) 300nouveau_drm_load(struct drm_device *dev, unsigned long flags)
291{ 301{
@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
297 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); 307 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
298 if (ret) 308 if (ret)
299 return ret; 309 return ret;
300 lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
301 310
302 dev->dev_private = drm; 311 dev->dev_private = drm;
303 drm->dev = dev; 312 drm->dev = dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9352010030e9..8f6d63d7edd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -385,6 +385,7 @@ out_unlock:
385 mutex_unlock(&dev->struct_mutex); 385 mutex_unlock(&dev->struct_mutex);
386 if (chan) 386 if (chan)
387 nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); 387 nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
388 nouveau_bo_unmap(nvbo);
388out_unpin: 389out_unpin:
389 nouveau_bo_unpin(nvbo); 390 nouveau_bo_unpin(nvbo);
390out_unref: 391out_unref:
@@ -397,7 +398,8 @@ void
397nouveau_fbcon_output_poll_changed(struct drm_device *dev) 398nouveau_fbcon_output_poll_changed(struct drm_device *dev)
398{ 399{
399 struct nouveau_drm *drm = nouveau_drm(dev); 400 struct nouveau_drm *drm = nouveau_drm(dev);
400 drm_fb_helper_hotplug_event(&drm->fbcon->helper); 401 if (drm->fbcon)
402 drm_fb_helper_hotplug_event(&drm->fbcon->helper);
401} 403}
402 404
403static int 405static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1680d9187bab..be3149932c2d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
143 int ret; 143 int ret;
144 144
145 fence->channel = chan; 145 fence->channel = chan;
146 fence->timeout = jiffies + (3 * DRM_HZ); 146 fence->timeout = jiffies + (15 * DRM_HZ);
147 fence->sequence = ++fctx->sequence; 147 fence->sequence = ++fctx->sequence;
148 148
149 ret = fctx->emit(fence); 149 ret = fctx->emit(fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e72d09c068a8..830cb7bad922 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
50 return; 50 return;
51 nvbo->gem = NULL; 51 nvbo->gem = NULL;
52 52
53 /* Lockdep hates you for doing reserve with gem object lock held */
54 if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
57 }
58
59 if (gem->import_attach) 53 if (gem->import_attach)
60 drm_prime_gem_destroy(gem, nvbo->bo.sg); 54 drm_prime_gem_destroy(gem, nvbo->bo.sg);
61 55
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9bae8c3..22aa9963ea6f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
76 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 76 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
77 struct nouveau_object *object; 77 struct nouveau_object *object;
78 u32 start = mem->start * PAGE_SIZE; 78 u32 start = mem->start * PAGE_SIZE;
79 u32 limit = mem->start + mem->size - 1; 79 u32 limit = start + mem->size - 1;
80 int ret = 0; 80 int ret = 0;
81 81
82 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 82 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 54dc6355b0c2..8b40a36c1b57 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -355,6 +355,7 @@ struct nv50_oimm {
355 355
356struct nv50_head { 356struct nv50_head {
357 struct nouveau_crtc base; 357 struct nouveau_crtc base;
358 struct nouveau_bo *image;
358 struct nv50_curs curs; 359 struct nv50_curs curs;
359 struct nv50_sync sync; 360 struct nv50_sync sync;
360 struct nv50_ovly ovly; 361 struct nv50_ovly ovly;
@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
517{ 518{
518 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 519 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
519 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 520 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
521 struct nv50_head *head = nv50_head(crtc);
520 struct nv50_sync *sync = nv50_sync(crtc); 522 struct nv50_sync *sync = nv50_sync(crtc);
521 int head = nv_crtc->index, ret;
522 u32 *push; 523 u32 *push;
524 int ret;
523 525
524 swap_interval <<= 4; 526 swap_interval <<= 4;
525 if (swap_interval == 0) 527 if (swap_interval == 0)
@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
537 return ret; 539 return ret;
538 540
539 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); 541 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
540 OUT_RING (chan, NvEvoSema0 + head); 542 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
541 OUT_RING (chan, sync->addr ^ 0x10); 543 OUT_RING (chan, sync->addr ^ 0x10);
542 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); 544 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
543 OUT_RING (chan, sync->data + 1); 545 OUT_RING (chan, sync->data + 1);
@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
546 OUT_RING (chan, sync->data); 548 OUT_RING (chan, sync->data);
547 } else 549 } else
548 if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { 550 if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
549 u64 addr = nv84_fence_crtc(chan, head) + sync->addr; 551 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
550 ret = RING_SPACE(chan, 12); 552 ret = RING_SPACE(chan, 12);
551 if (ret) 553 if (ret)
552 return ret; 554 return ret;
@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
565 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); 567 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
566 } else 568 } else
567 if (chan) { 569 if (chan) {
568 u64 addr = nv84_fence_crtc(chan, head) + sync->addr; 570 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
569 ret = RING_SPACE(chan, 10); 571 ret = RING_SPACE(chan, 10);
570 if (ret) 572 if (ret)
571 return ret; 573 return ret;
@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
630 evo_mthd(push, 0x0080, 1); 632 evo_mthd(push, 0x0080, 1);
631 evo_data(push, 0x00000000); 633 evo_data(push, 0x00000000);
632 evo_kick(push, sync); 634 evo_kick(push, sync);
635
636 nouveau_bo_ref(nv_fb->nvbo, &head->image);
633 return 0; 637 return 0;
634} 638}
635 639
@@ -1038,18 +1042,17 @@ static int
1038nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) 1042nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
1039{ 1043{
1040 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); 1044 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
1045 struct nv50_head *head = nv50_head(crtc);
1041 int ret; 1046 int ret;
1042 1047
1043 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); 1048 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
1044 if (ret) 1049 if (ret == 0) {
1045 return ret; 1050 if (head->image)
1046 1051 nouveau_bo_unpin(head->image);
1047 if (old_fb) { 1052 nouveau_bo_ref(nvfb->nvbo, &head->image);
1048 nvfb = nouveau_framebuffer(old_fb);
1049 nouveau_bo_unpin(nvfb->nvbo);
1050 } 1053 }
1051 1054
1052 return 0; 1055 return ret;
1053} 1056}
1054 1057
1055static int 1058static int
@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
1198 } 1201 }
1199} 1202}
1200 1203
1204static void
1205nv50_crtc_disable(struct drm_crtc *crtc)
1206{
1207 struct nv50_head *head = nv50_head(crtc);
1208 if (head->image)
1209 nouveau_bo_unpin(head->image);
1210 nouveau_bo_ref(NULL, &head->image);
1211}
1212
1201static int 1213static int
1202nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 1214nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1203 uint32_t handle, uint32_t width, uint32_t height) 1215 uint32_t handle, uint32_t width, uint32_t height)
@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
1271 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1283 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1272 struct nv50_disp *disp = nv50_disp(crtc->dev); 1284 struct nv50_disp *disp = nv50_disp(crtc->dev);
1273 struct nv50_head *head = nv50_head(crtc); 1285 struct nv50_head *head = nv50_head(crtc);
1286
1274 nv50_dmac_destroy(disp->core, &head->ovly.base); 1287 nv50_dmac_destroy(disp->core, &head->ovly.base);
1275 nv50_pioc_destroy(disp->core, &head->oimm.base); 1288 nv50_pioc_destroy(disp->core, &head->oimm.base);
1276 nv50_dmac_destroy(disp->core, &head->sync.base); 1289 nv50_dmac_destroy(disp->core, &head->sync.base);
1277 nv50_pioc_destroy(disp->core, &head->curs.base); 1290 nv50_pioc_destroy(disp->core, &head->curs.base);
1291
1292 /*XXX: this shouldn't be necessary, but the core doesn't call
1293 * disconnect() during the cleanup paths
1294 */
1295 if (head->image)
1296 nouveau_bo_unpin(head->image);
1297 nouveau_bo_ref(NULL, &head->image);
1298
1278 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 1299 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
1279 if (nv_crtc->cursor.nvbo) 1300 if (nv_crtc->cursor.nvbo)
1280 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 1301 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
1281 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 1302 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
1303
1282 nouveau_bo_unmap(nv_crtc->lut.nvbo); 1304 nouveau_bo_unmap(nv_crtc->lut.nvbo);
1283 if (nv_crtc->lut.nvbo) 1305 if (nv_crtc->lut.nvbo)
1284 nouveau_bo_unpin(nv_crtc->lut.nvbo); 1306 nouveau_bo_unpin(nv_crtc->lut.nvbo);
1285 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 1307 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
1308
1286 drm_crtc_cleanup(crtc); 1309 drm_crtc_cleanup(crtc);
1287 kfree(crtc); 1310 kfree(crtc);
1288} 1311}
@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
1296 .mode_set_base = nv50_crtc_mode_set_base, 1319 .mode_set_base = nv50_crtc_mode_set_base,
1297 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, 1320 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
1298 .load_lut = nv50_crtc_lut_load, 1321 .load_lut = nv50_crtc_lut_load,
1322 .disable = nv50_crtc_disable,
1299}; 1323};
1300 1324
1301static const struct drm_crtc_funcs nv50_crtc_func = { 1325static const struct drm_crtc_funcs nv50_crtc_func = {
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e567db8..0ee363840035 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
39 struct nv10_fence_chan *fctx; 39 struct nv10_fence_chan *fctx;
40 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 40 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
41 struct nouveau_object *object; 41 struct nouveau_object *object;
42 u32 start = mem->start * PAGE_SIZE;
43 u32 limit = start + mem->size - 1;
42 int ret, i; 44 int ret, i;
43 45
44 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 46 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
51 fctx->base.sync = nv17_fence_sync; 53 fctx->base.sync = nv17_fence_sync;
52 54
53 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 55 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
54 NvSema, 0x0002, 56 NvSema, 0x003d,
55 &(struct nv_dma_class) { 57 &(struct nv_dma_class) {
56 .flags = NV_DMA_TARGET_VRAM | 58 .flags = NV_DMA_TARGET_VRAM |
57 NV_DMA_ACCESS_RDWR, 59 NV_DMA_ACCESS_RDWR,
58 .start = mem->start * PAGE_SIZE, 60 .start = start,
59 .limit = mem->size - 1, 61 .limit = limit,
60 }, sizeof(struct nv_dma_class), 62 }, sizeof(struct nv_dma_class),
61 &object); 63 &object);
62 64
63 /* dma objects for display sync channel semaphore blocks */ 65 /* dma objects for display sync channel semaphore blocks */
64 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { 66 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
65 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); 67 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
68 u32 start = bo->bo.mem.start * PAGE_SIZE;
69 u32 limit = start + bo->bo.mem.size - 1;
66 70
67 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 71 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
68 NvEvoSema0 + i, 0x003d, 72 NvEvoSema0 + i, 0x003d,
69 &(struct nv_dma_class) { 73 &(struct nv_dma_class) {
70 .flags = NV_DMA_TARGET_VRAM | 74 .flags = NV_DMA_TARGET_VRAM |
71 NV_DMA_ACCESS_RDWR, 75 NV_DMA_ACCESS_RDWR,
72 .start = bo->bo.offset, 76 .start = start,
73 .limit = bo->bo.offset + 0xfff, 77 .limit = limit,
74 }, sizeof(struct nv_dma_class), 78 }, sizeof(struct nv_dma_class),
75 &object); 79 &object);
76 } 80 }
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2cceb51..eb89653a7a17 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
182 183
183 cmd.type = type; 184 cmd.type = type;
184 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
185 186
186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
187} 188}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
191 uint32_t type, bool interruptible) 192 uint32_t type, bool interruptible)
192{ 193{
193 struct qxl_command cmd; 194 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
194 196
195 cmd.type = type; 197 cmd.type = type;
196 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
197 199
198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
199} 201}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
214 struct qxl_release *release; 216 struct qxl_release *release;
215 uint64_t id, next_id; 217 uint64_t id, next_id;
216 int i = 0; 218 int i = 0;
217 int ret;
218 union qxl_release_info *info; 219 union qxl_release_info *info;
219 220
220 while (qxl_ring_pop(qdev->release_ring, &id)) { 221 while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
224 if (release == NULL) 225 if (release == NULL)
225 break; 226 break;
226 227
227 ret = qxl_release_reserve(qdev, release, false);
228 if (ret) {
229 qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
230 DRM_ERROR("failed to reserve release %lld\n", id);
231 }
232
233 info = qxl_release_map(qdev, release); 228 info = qxl_release_map(qdev, release);
234 next_id = info->next; 229 next_id = info->next;
235 qxl_release_unmap(qdev, release, info); 230 qxl_release_unmap(qdev, release, info);
236 231
237 qxl_release_unreserve(qdev, release);
238 QXL_INFO(qdev, "popped %lld, next %lld\n", id, 232 QXL_INFO(qdev, "popped %lld, next %lld\n", id,
239 next_id); 233 next_id);
240 234
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
259 return i; 253 return i;
260} 254}
261 255
262int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 256int qxl_alloc_bo_reserved(struct qxl_device *qdev,
257 struct qxl_release *release,
258 unsigned long size,
263 struct qxl_bo **_bo) 259 struct qxl_bo **_bo)
264{ 260{
265 struct qxl_bo *bo; 261 struct qxl_bo *bo;
266 int ret; 262 int ret;
267 263
268 ret = qxl_bo_create(qdev, size, false /* not kernel - device */, 264 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
269 QXL_GEM_DOMAIN_VRAM, NULL, &bo); 265 false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
270 if (ret) { 266 if (ret) {
271 DRM_ERROR("failed to allocate VRAM BO\n"); 267 DRM_ERROR("failed to allocate VRAM BO\n");
272 return ret; 268 return ret;
273 } 269 }
274 ret = qxl_bo_reserve(bo, false); 270 ret = qxl_release_list_add(release, bo);
275 if (unlikely(ret != 0)) 271 if (ret)
276 goto out_unref; 272 goto out_unref;
277 273
278 *_bo = bo; 274 *_bo = bo;
279 return 0; 275 return 0;
280out_unref: 276out_unref:
281 qxl_bo_unref(&bo); 277 qxl_bo_unref(&bo);
282 return 0; 278 return ret;
283} 279}
284 280
285static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) 281static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
503 if (ret) 499 if (ret)
504 return ret; 500 return ret;
505 501
502 ret = qxl_release_reserve_list(release, true);
503 if (ret)
504 return ret;
505
506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); 506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
507 cmd->type = QXL_SURFACE_CMD_CREATE; 507 cmd->type = QXL_SURFACE_CMD_CREATE;
508 cmd->u.surface_create.format = surf->surf.format; 508 cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
524 524
525 surf->surf_create = release; 525 surf->surf_create = release;
526 526
527 /* no need to add a release to the fence for this bo, 527 /* no need to add a release to the fence for this surface bo,
528 since it is only released when we ask to destroy the surface 528 since it is only released when we ask to destroy the surface
529 and it would never signal otherwise */ 529 and it would never signal otherwise */
530 qxl_fence_releaseable(qdev, release);
531
532 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 530 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
533 531 qxl_release_fence_buffer_objects(release);
534 qxl_release_unreserve(qdev, release);
535 532
536 surf->hw_surf_alloc = true; 533 surf->hw_surf_alloc = true;
537 spin_lock(&qdev->surf_id_idr_lock); 534 spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
573 cmd->surface_id = id; 570 cmd->surface_id = id;
574 qxl_release_unmap(qdev, release, &cmd->release_info); 571 qxl_release_unmap(qdev, release, &cmd->release_info);
575 572
576 qxl_fence_releaseable(qdev, release);
577
578 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 573 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
579 574
580 qxl_release_unreserve(qdev, release); 575 qxl_release_fence_buffer_objects(release);
581
582 576
583 return 0; 577 return 0;
584} 578}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd7bfc4..835caba026d3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
179 kfree(qxl_crtc); 179 kfree(qxl_crtc);
180} 180}
181 181
182static void 182static int
183qxl_hide_cursor(struct qxl_device *qdev) 183qxl_hide_cursor(struct qxl_device *qdev)
184{ 184{
185 struct qxl_release *release; 185 struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
188 188
189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
190 &release, NULL); 190 &release, NULL);
191 if (ret)
192 return ret;
193
194 ret = qxl_release_reserve_list(release, true);
195 if (ret) {
196 qxl_release_free(qdev, release);
197 return ret;
198 }
191 199
192 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 200 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
193 cmd->type = QXL_CURSOR_HIDE; 201 cmd->type = QXL_CURSOR_HIDE;
194 qxl_release_unmap(qdev, release, &cmd->release_info); 202 qxl_release_unmap(qdev, release, &cmd->release_info);
195 203
196 qxl_fence_releaseable(qdev, release);
197 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 204 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
198 qxl_release_unreserve(qdev, release); 205 qxl_release_fence_buffer_objects(release);
206 return 0;
199} 207}
200 208
201static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, 209static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
216 224
217 int size = 64*64*4; 225 int size = 64*64*4;
218 int ret = 0; 226 int ret = 0;
219 if (!handle) { 227 if (!handle)
220 qxl_hide_cursor(qdev); 228 return qxl_hide_cursor(qdev);
221 return 0;
222 }
223 229
224 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 230 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
225 if (!obj) { 231 if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
234 goto out_unref; 240 goto out_unref;
235 241
236 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); 242 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
243 qxl_bo_unreserve(user_bo);
237 if (ret) 244 if (ret)
238 goto out_unreserve; 245 goto out_unref;
239 246
240 ret = qxl_bo_kmap(user_bo, &user_ptr); 247 ret = qxl_bo_kmap(user_bo, &user_ptr);
241 if (ret) 248 if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
246 &release, NULL); 253 &release, NULL);
247 if (ret) 254 if (ret)
248 goto out_kunmap; 255 goto out_kunmap;
249 ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, 256
250 &cursor_bo); 257 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
258 &cursor_bo);
251 if (ret) 259 if (ret)
252 goto out_free_release; 260 goto out_free_release;
253 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 261
262 ret = qxl_release_reserve_list(release, false);
254 if (ret) 263 if (ret)
255 goto out_free_bo; 264 goto out_free_bo;
256 265
266 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
267 if (ret)
268 goto out_backoff;
269
257 cursor->header.unique = 0; 270 cursor->header.unique = 0;
258 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; 271 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
259 cursor->header.width = 64; 272 cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
269 282
270 qxl_bo_kunmap(cursor_bo); 283 qxl_bo_kunmap(cursor_bo);
271 284
272 /* finish with the userspace bo */
273 qxl_bo_kunmap(user_bo); 285 qxl_bo_kunmap(user_bo);
274 qxl_bo_unpin(user_bo);
275 qxl_bo_unreserve(user_bo);
276 drm_gem_object_unreference_unlocked(obj);
277 286
278 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 287 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
279 cmd->type = QXL_CURSOR_SET; 288 cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
281 cmd->u.set.position.y = qcrtc->cur_y; 290 cmd->u.set.position.y = qcrtc->cur_y;
282 291
283 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); 292 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
284 qxl_release_add_res(qdev, release, cursor_bo);
285 293
286 cmd->u.set.visible = 1; 294 cmd->u.set.visible = 1;
287 qxl_release_unmap(qdev, release, &cmd->release_info); 295 qxl_release_unmap(qdev, release, &cmd->release_info);
288 296
289 qxl_fence_releaseable(qdev, release);
290 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 297 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
291 qxl_release_unreserve(qdev, release); 298 qxl_release_fence_buffer_objects(release);
299
300 /* finish with the userspace bo */
301 ret = qxl_bo_reserve(user_bo, false);
302 if (!ret) {
303 qxl_bo_unpin(user_bo);
304 qxl_bo_unreserve(user_bo);
305 }
306 drm_gem_object_unreference_unlocked(obj);
292 307
293 qxl_bo_unreserve(cursor_bo);
294 qxl_bo_unref(&cursor_bo); 308 qxl_bo_unref(&cursor_bo);
295 309
296 return ret; 310 return ret;
311
312out_backoff:
313 qxl_release_backoff_reserve_list(release);
297out_free_bo: 314out_free_bo:
298 qxl_bo_unref(&cursor_bo); 315 qxl_bo_unref(&cursor_bo);
299out_free_release: 316out_free_release:
300 qxl_release_unreserve(qdev, release);
301 qxl_release_free(qdev, release); 317 qxl_release_free(qdev, release);
302out_kunmap: 318out_kunmap:
303 qxl_bo_kunmap(user_bo); 319 qxl_bo_kunmap(user_bo);
304out_unpin: 320out_unpin:
305 qxl_bo_unpin(user_bo); 321 qxl_bo_unpin(user_bo);
306out_unreserve:
307 qxl_bo_unreserve(user_bo);
308out_unref: 322out_unref:
309 drm_gem_object_unreference_unlocked(obj); 323 drm_gem_object_unreference_unlocked(obj);
310 return ret; 324 return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
322 336
323 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 337 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
324 &release, NULL); 338 &release, NULL);
339 if (ret)
340 return ret;
341
342 ret = qxl_release_reserve_list(release, true);
343 if (ret) {
344 qxl_release_free(qdev, release);
345 return ret;
346 }
325 347
326 qcrtc->cur_x = x; 348 qcrtc->cur_x = x;
327 qcrtc->cur_y = y; 349 qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
332 cmd->u.position.y = qcrtc->cur_y; 354 cmd->u.position.y = qcrtc->cur_y;
333 qxl_release_unmap(qdev, release, &cmd->release_info); 355 qxl_release_unmap(qdev, release, &cmd->release_info);
334 356
335 qxl_fence_releaseable(qdev, release);
336 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 357 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
337 qxl_release_unreserve(qdev, release); 358 qxl_release_fence_buffer_objects(release);
359
338 return 0; 360 return 0;
339} 361}
340 362
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3dbf9378..56e1d633875e 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
23#include "qxl_drv.h" 23#include "qxl_drv.h"
24#include "qxl_object.h" 24#include "qxl_object.h"
25 25
26static int alloc_clips(struct qxl_device *qdev,
27 struct qxl_release *release,
28 unsigned num_clips,
29 struct qxl_bo **clips_bo)
30{
31 int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
32
33 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
34}
35
26/* returns a pointer to the already allocated qxl_rect array inside 36/* returns a pointer to the already allocated qxl_rect array inside
27 * the qxl_clip_rects. This is *not* the same as the memory allocated 37 * the qxl_clip_rects. This is *not* the same as the memory allocated
28 * on the device, it is offset to qxl_clip_rects.chunk.data */ 38 * on the device, it is offset to qxl_clip_rects.chunk.data */
29static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, 39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
30 struct qxl_drawable *drawable, 40 struct qxl_drawable *drawable,
31 unsigned num_clips, 41 unsigned num_clips,
32 struct qxl_bo **clips_bo, 42 struct qxl_bo *clips_bo)
33 struct qxl_release *release)
34{ 43{
35 struct qxl_clip_rects *dev_clips; 44 struct qxl_clip_rects *dev_clips;
36 int ret; 45 int ret;
37 int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
38 ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
39 if (ret)
40 return NULL;
41 46
42 ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); 47 ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
43 if (ret) { 48 if (ret) {
44 qxl_bo_unref(clips_bo);
45 return NULL; 49 return NULL;
46 } 50 }
47 dev_clips->num_rects = num_clips; 51 dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
52} 56}
53 57
54static int 58static int
59alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
60{
61 int ret;
62 ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
63 QXL_RELEASE_DRAWABLE, release,
64 NULL);
65 return ret;
66}
67
68static void
69free_drawable(struct qxl_device *qdev, struct qxl_release *release)
70{
71 qxl_release_free(qdev, release);
72}
73
74/* release needs to be reserved at this point */
75static int
55make_drawable(struct qxl_device *qdev, int surface, uint8_t type, 76make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
56 const struct qxl_rect *rect, 77 const struct qxl_rect *rect,
57 struct qxl_release **release) 78 struct qxl_release *release)
58{ 79{
59 struct qxl_drawable *drawable; 80 struct qxl_drawable *drawable;
60 int i, ret; 81 int i;
61 82
62 ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), 83 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
63 QXL_RELEASE_DRAWABLE, release, 84 if (!drawable)
64 NULL); 85 return -ENOMEM;
65 if (ret)
66 return ret;
67 86
68 drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
69 drawable->type = type; 87 drawable->type = type;
70 88
71 drawable->surface_id = surface; /* Only primary for now */ 89 drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
91 drawable->bbox = *rect; 109 drawable->bbox = *rect;
92 110
93 drawable->mm_time = qdev->rom->mm_clock; 111 drawable->mm_time = qdev->rom->mm_clock;
94 qxl_release_unmap(qdev, *release, &drawable->release_info); 112 qxl_release_unmap(qdev, release, &drawable->release_info);
95 return 0; 113 return 0;
96} 114}
97 115
98static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, 116static int alloc_palette_object(struct qxl_device *qdev,
117 struct qxl_release *release,
118 struct qxl_bo **palette_bo)
119{
120 return qxl_alloc_bo_reserved(qdev, release,
121 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
122 palette_bo);
123}
124
125static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
126 struct qxl_release *release,
99 const struct qxl_fb_image *qxl_fb_image) 127 const struct qxl_fb_image *qxl_fb_image)
100{ 128{
101 struct qxl_device *qdev = qxl_fb_image->qdev;
102 const struct fb_image *fb_image = &qxl_fb_image->fb_image; 129 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
103 uint32_t visual = qxl_fb_image->visual; 130 uint32_t visual = qxl_fb_image->visual;
104 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; 131 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
108 static uint64_t unique; /* we make no attempt to actually set this 135 static uint64_t unique; /* we make no attempt to actually set this
109 * correctly globaly, since that would require 136 * correctly globaly, since that would require
110 * tracking all of our palettes. */ 137 * tracking all of our palettes. */
111 138 ret = qxl_bo_kmap(palette_bo, (void **)&pal);
112 ret = qxl_alloc_bo_reserved(qdev,
113 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
114 palette_bo);
115
116 ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
117 pal->num_ents = 2; 139 pal->num_ents = 2;
118 pal->unique = unique++; 140 pal->unique = unique++;
119 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { 141 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
126 } 148 }
127 pal->ents[0] = bgcolor; 149 pal->ents[0] = bgcolor;
128 pal->ents[1] = fgcolor; 150 pal->ents[1] = fgcolor;
129 qxl_bo_kunmap(*palette_bo); 151 qxl_bo_kunmap(palette_bo);
130 return 0; 152 return 0;
131} 153}
132 154
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
144 const char *src = fb_image->data; 166 const char *src = fb_image->data;
145 int depth = fb_image->depth; 167 int depth = fb_image->depth;
146 struct qxl_release *release; 168 struct qxl_release *release;
147 struct qxl_bo *image_bo;
148 struct qxl_image *image; 169 struct qxl_image *image;
149 int ret; 170 int ret;
150 171 struct qxl_drm_image *dimage;
172 struct qxl_bo *palette_bo = NULL;
151 if (stride == 0) 173 if (stride == 0)
152 stride = depth * width / 8; 174 stride = depth * width / 8;
153 175
176 ret = alloc_drawable(qdev, &release);
177 if (ret)
178 return;
179
180 ret = qxl_image_alloc_objects(qdev, release,
181 &dimage,
182 height, stride);
183 if (ret)
184 goto out_free_drawable;
185
186 if (depth == 1) {
187 ret = alloc_palette_object(qdev, release, &palette_bo);
188 if (ret)
189 goto out_free_image;
190 }
191
192 /* do a reservation run over all the objects we just allocated */
193 ret = qxl_release_reserve_list(release, true);
194 if (ret)
195 goto out_free_palette;
196
154 rect.left = x; 197 rect.left = x;
155 rect.right = x + width; 198 rect.right = x + width;
156 rect.top = y; 199 rect.top = y;
157 rect.bottom = y + height; 200 rect.bottom = y + height;
158 201
159 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); 202 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
160 if (ret) 203 if (ret) {
161 return; 204 qxl_release_backoff_reserve_list(release);
205 goto out_free_palette;
206 }
162 207
163 ret = qxl_image_create(qdev, release, &image_bo, 208 ret = qxl_image_init(qdev, release, dimage,
164 (const uint8_t *)src, 0, 0, 209 (const uint8_t *)src, 0, 0,
165 width, height, depth, stride); 210 width, height, depth, stride);
166 if (ret) { 211 if (ret) {
167 qxl_release_unreserve(qdev, release); 212 qxl_release_backoff_reserve_list(release);
168 qxl_release_free(qdev, release); 213 qxl_release_free(qdev, release);
169 return; 214 return;
170 } 215 }
171 216
172 if (depth == 1) { 217 if (depth == 1) {
173 struct qxl_bo *palette_bo;
174 void *ptr; 218 void *ptr;
175 ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); 219 ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
176 qxl_release_add_res(qdev, release, palette_bo);
177 220
178 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); 221 ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
179 image = ptr; 222 image = ptr;
180 image->u.bitmap.palette = 223 image->u.bitmap.palette =
181 qxl_bo_physical_address(qdev, palette_bo, 0); 224 qxl_bo_physical_address(qdev, palette_bo, 0);
182 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 225 qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
183 qxl_bo_unreserve(palette_bo);
184 qxl_bo_unref(&palette_bo);
185 } 226 }
186 227
187 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 228 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
199 drawable->u.copy.mask.bitmap = 0; 240 drawable->u.copy.mask.bitmap = 0;
200 241
201 drawable->u.copy.src_bitmap = 242 drawable->u.copy.src_bitmap =
202 qxl_bo_physical_address(qdev, image_bo, 0); 243 qxl_bo_physical_address(qdev, dimage->bo, 0);
203 qxl_release_unmap(qdev, release, &drawable->release_info); 244 qxl_release_unmap(qdev, release, &drawable->release_info);
204 245
205 qxl_release_add_res(qdev, release, image_bo);
206 qxl_bo_unreserve(image_bo);
207 qxl_bo_unref(&image_bo);
208
209 qxl_fence_releaseable(qdev, release);
210 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 246 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
211 qxl_release_unreserve(qdev, release); 247 qxl_release_fence_buffer_objects(release);
248
249out_free_palette:
250 if (palette_bo)
251 qxl_bo_unref(&palette_bo);
252out_free_image:
253 qxl_image_free_objects(qdev, dimage);
254out_free_drawable:
255 if (ret)
256 free_drawable(qdev, release);
212} 257}
213 258
214/* push a draw command using the given clipping rectangles as 259/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
243 int depth = qxl_fb->base.bits_per_pixel; 288 int depth = qxl_fb->base.bits_per_pixel;
244 uint8_t *surface_base; 289 uint8_t *surface_base;
245 struct qxl_release *release; 290 struct qxl_release *release;
246 struct qxl_bo *image_bo;
247 struct qxl_bo *clips_bo; 291 struct qxl_bo *clips_bo;
292 struct qxl_drm_image *dimage;
248 int ret; 293 int ret;
249 294
295 ret = alloc_drawable(qdev, &release);
296 if (ret)
297 return;
298
250 left = clips->x1; 299 left = clips->x1;
251 right = clips->x2; 300 right = clips->x2;
252 top = clips->y1; 301 top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
263 312
264 width = right - left; 313 width = right - left;
265 height = bottom - top; 314 height = bottom - top;
315
316 ret = alloc_clips(qdev, release, num_clips, &clips_bo);
317 if (ret)
318 goto out_free_drawable;
319
320 ret = qxl_image_alloc_objects(qdev, release,
321 &dimage,
322 height, stride);
323 if (ret)
324 goto out_free_clips;
325
326 /* do a reservation run over all the objects we just allocated */
327 ret = qxl_release_reserve_list(release, true);
328 if (ret)
329 goto out_free_image;
330
266 drawable_rect.left = left; 331 drawable_rect.left = left;
267 drawable_rect.right = right; 332 drawable_rect.right = right;
268 drawable_rect.top = top; 333 drawable_rect.top = top;
269 drawable_rect.bottom = bottom; 334 drawable_rect.bottom = bottom;
335
270 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, 336 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
271 &release); 337 release);
272 if (ret) 338 if (ret)
273 return; 339 goto out_release_backoff;
274 340
275 ret = qxl_bo_kmap(bo, (void **)&surface_base); 341 ret = qxl_bo_kmap(bo, (void **)&surface_base);
276 if (ret) 342 if (ret)
277 goto out_unref; 343 goto out_release_backoff;
278 344
279 ret = qxl_image_create(qdev, release, &image_bo, surface_base, 345
280 left, top, width, height, depth, stride); 346 ret = qxl_image_init(qdev, release, dimage, surface_base,
347 left, top, width, height, depth, stride);
281 qxl_bo_kunmap(bo); 348 qxl_bo_kunmap(bo);
282 if (ret) 349 if (ret)
283 goto out_unref; 350 goto out_release_backoff;
351
352 rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
353 if (!rects)
354 goto out_release_backoff;
284 355
285 rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
286 if (!rects) {
287 qxl_bo_unref(&image_bo);
288 goto out_unref;
289 }
290 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 356 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
291 357
292 drawable->clip.type = SPICE_CLIP_TYPE_RECTS; 358 drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
293 drawable->clip.data = qxl_bo_physical_address(qdev, 359 drawable->clip.data = qxl_bo_physical_address(qdev,
294 clips_bo, 0); 360 clips_bo, 0);
295 qxl_release_add_res(qdev, release, clips_bo);
296 361
297 drawable->u.copy.src_area.top = 0; 362 drawable->u.copy.src_area.top = 0;
298 drawable->u.copy.src_area.bottom = height; 363 drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
306 drawable->u.copy.mask.pos.y = 0; 371 drawable->u.copy.mask.pos.y = 0;
307 drawable->u.copy.mask.bitmap = 0; 372 drawable->u.copy.mask.bitmap = 0;
308 373
309 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); 374 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
310 qxl_release_unmap(qdev, release, &drawable->release_info); 375 qxl_release_unmap(qdev, release, &drawable->release_info);
311 qxl_release_add_res(qdev, release, image_bo); 376
312 qxl_bo_unreserve(image_bo);
313 qxl_bo_unref(&image_bo);
314 clips_ptr = clips; 377 clips_ptr = clips;
315 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 378 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
316 rects[i].left = clips_ptr->x1; 379 rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
319 rects[i].bottom = clips_ptr->y2; 382 rects[i].bottom = clips_ptr->y2;
320 } 383 }
321 qxl_bo_kunmap(clips_bo); 384 qxl_bo_kunmap(clips_bo);
322 qxl_bo_unreserve(clips_bo);
323 qxl_bo_unref(&clips_bo);
324 385
325 qxl_fence_releaseable(qdev, release);
326 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 386 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
327 qxl_release_unreserve(qdev, release); 387 qxl_release_fence_buffer_objects(release);
328 return; 388
389out_release_backoff:
390 if (ret)
391 qxl_release_backoff_reserve_list(release);
392out_free_image:
393 qxl_image_free_objects(qdev, dimage);
394out_free_clips:
395 qxl_bo_unref(&clips_bo);
396out_free_drawable:
397 /* only free drawable on error */
398 if (ret)
399 free_drawable(qdev, release);
329 400
330out_unref:
331 qxl_release_unreserve(qdev, release);
332 qxl_release_free(qdev, release);
333} 401}
334 402
335void qxl_draw_copyarea(struct qxl_device *qdev, 403void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
342 struct qxl_release *release; 410 struct qxl_release *release;
343 int ret; 411 int ret;
344 412
413 ret = alloc_drawable(qdev, &release);
414 if (ret)
415 return;
416
417 /* do a reservation run over all the objects we just allocated */
418 ret = qxl_release_reserve_list(release, true);
419 if (ret)
420 goto out_free_release;
421
345 rect.left = dx; 422 rect.left = dx;
346 rect.top = dy; 423 rect.top = dy;
347 rect.right = dx + width; 424 rect.right = dx + width;
348 rect.bottom = dy + height; 425 rect.bottom = dy + height;
349 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); 426 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
350 if (ret) 427 if (ret) {
351 return; 428 qxl_release_backoff_reserve_list(release);
429 goto out_free_release;
430 }
352 431
353 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 432 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
354 drawable->u.copy_bits.src_pos.x = sx; 433 drawable->u.copy_bits.src_pos.x = sx;
355 drawable->u.copy_bits.src_pos.y = sy; 434 drawable->u.copy_bits.src_pos.y = sy;
356
357 qxl_release_unmap(qdev, release, &drawable->release_info); 435 qxl_release_unmap(qdev, release, &drawable->release_info);
358 qxl_fence_releaseable(qdev, release); 436
359 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 437 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
360 qxl_release_unreserve(qdev, release); 438 qxl_release_fence_buffer_objects(release);
439
440out_free_release:
441 if (ret)
442 free_drawable(qdev, release);
361} 443}
362 444
363void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) 445void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
370 struct qxl_release *release; 452 struct qxl_release *release;
371 int ret; 453 int ret;
372 454
373 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); 455 ret = alloc_drawable(qdev, &release);
374 if (ret) 456 if (ret)
375 return; 457 return;
376 458
459 /* do a reservation run over all the objects we just allocated */
460 ret = qxl_release_reserve_list(release, true);
461 if (ret)
462 goto out_free_release;
463
464 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
465 if (ret) {
466 qxl_release_backoff_reserve_list(release);
467 goto out_free_release;
468 }
469
377 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 470 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
378 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; 471 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
379 drawable->u.fill.brush.u.color = color; 472 drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
384 drawable->u.fill.mask.bitmap = 0; 477 drawable->u.fill.mask.bitmap = 0;
385 478
386 qxl_release_unmap(qdev, release, &drawable->release_info); 479 qxl_release_unmap(qdev, release, &drawable->release_info);
387 qxl_fence_releaseable(qdev, release); 480
388 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 481 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
389 qxl_release_unreserve(qdev, release); 482 qxl_release_fence_buffer_objects(release);
483
484out_free_release:
485 if (ret)
486 free_drawable(qdev, release);
390} 487}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791464a3..7e96f4f11738 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
42#include <ttm/ttm_placement.h> 42#include <ttm/ttm_placement.h>
43#include <ttm/ttm_module.h> 43#include <ttm/ttm_module.h>
44 44
45/* just for ttm_validate_buffer */
46#include <ttm/ttm_execbuf_util.h>
47
45#include <drm/qxl_drm.h> 48#include <drm/qxl_drm.h>
46#include "qxl_dev.h" 49#include "qxl_dev.h"
47 50
@@ -118,9 +121,9 @@ struct qxl_bo {
118 uint32_t surface_id; 121 uint32_t surface_id;
119 struct qxl_fence fence; /* per bo fence - list of releases */ 122 struct qxl_fence fence; /* per bo fence - list of releases */
120 struct qxl_release *surf_create; 123 struct qxl_release *surf_create;
121 atomic_t reserve_count;
122}; 124};
123#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) 125#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
126#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
124 127
125struct qxl_gem { 128struct qxl_gem {
126 struct mutex mutex; 129 struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
128}; 131};
129 132
130struct qxl_bo_list { 133struct qxl_bo_list {
131 struct list_head lhead; 134 struct ttm_validate_buffer tv;
132 struct qxl_bo *bo;
133};
134
135struct qxl_reloc_list {
136 struct list_head bos;
137}; 135};
138 136
139struct qxl_crtc { 137struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
195struct qxl_release { 193struct qxl_release {
196 int id; 194 int id;
197 int type; 195 int type;
198 int bo_count;
199 uint32_t release_offset; 196 uint32_t release_offset;
200 uint32_t surface_release_id; 197 uint32_t surface_release_id;
201 struct qxl_bo *bos[QXL_MAX_RES]; 198 struct ww_acquire_ctx ticket;
199 struct list_head bos;
200};
201
202struct qxl_drm_chunk {
203 struct list_head head;
204 struct qxl_bo *bo;
205};
206
207struct qxl_drm_image {
208 struct qxl_bo *bo;
209 struct list_head chunk_list;
202}; 210};
203 211
204struct qxl_fb_image { 212struct qxl_fb_image {
@@ -314,6 +322,7 @@ struct qxl_device {
314 struct workqueue_struct *gc_queue; 322 struct workqueue_struct *gc_queue;
315 struct work_struct gc_work; 323 struct work_struct gc_work;
316 324
325 struct work_struct fb_work;
317}; 326};
318 327
319/* forward declaration for QXL_INFO_IO */ 328/* forward declaration for QXL_INFO_IO */
@@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
433 442
434/* qxl image */ 443/* qxl image */
435 444
436int qxl_image_create(struct qxl_device *qdev, 445int qxl_image_init(struct qxl_device *qdev,
437 struct qxl_release *release, 446 struct qxl_release *release,
438 struct qxl_bo **image_bo, 447 struct qxl_drm_image *dimage,
439 const uint8_t *data, 448 const uint8_t *data,
440 int x, int y, int width, int height, 449 int x, int y, int width, int height,
441 int depth, int stride); 450 int depth, int stride);
451int
452qxl_image_alloc_objects(struct qxl_device *qdev,
453 struct qxl_release *release,
454 struct qxl_drm_image **image_ptr,
455 int height, int stride);
456void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
457
442void qxl_update_screen(struct qxl_device *qxl); 458void qxl_update_screen(struct qxl_device *qxl);
443 459
444/* qxl io operations (qxl_cmd.c) */ 460/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
459void qxl_io_flush_release(struct qxl_device *qdev); 475void qxl_io_flush_release(struct qxl_device *qdev);
460void qxl_io_flush_surfaces(struct qxl_device *qdev); 476void qxl_io_flush_surfaces(struct qxl_device *qdev);
461 477
462int qxl_release_reserve(struct qxl_device *qdev,
463 struct qxl_release *release, bool no_wait);
464void qxl_release_unreserve(struct qxl_device *qdev,
465 struct qxl_release *release);
466union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 478union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
467 struct qxl_release *release); 479 struct qxl_release *release);
468void qxl_release_unmap(struct qxl_device *qdev, 480void qxl_release_unmap(struct qxl_device *qdev,
469 struct qxl_release *release, 481 struct qxl_release *release,
470 union qxl_release_info *info); 482 union qxl_release_info *info);
471/* 483int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
472 * qxl_bo_add_resource. 484int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
473 * 485void qxl_release_backoff_reserve_list(struct qxl_release *release);
474 */ 486void qxl_release_fence_buffer_objects(struct qxl_release *release);
475void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
476 487
477int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 488int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
478 enum qxl_surface_cmd_type surface_cmd_type, 489 enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
481int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 492int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
482 int type, struct qxl_release **release, 493 int type, struct qxl_release **release,
483 struct qxl_bo **rbo); 494 struct qxl_bo **rbo);
484int qxl_fence_releaseable(struct qxl_device *qdev, 495
485 struct qxl_release *release);
486int 496int
487qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, 497qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
488 uint32_t type, bool interruptible); 498 uint32_t type, bool interruptible);
489int 499int
490qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, 500qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
491 uint32_t type, bool interruptible); 501 uint32_t type, bool interruptible);
492int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 502int qxl_alloc_bo_reserved(struct qxl_device *qdev,
503 struct qxl_release *release,
504 unsigned long size,
493 struct qxl_bo **_bo); 505 struct qxl_bo **_bo);
494/* qxl drawing commands */ 506/* qxl drawing commands */
495 507
@@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
510 u32 sx, u32 sy, 522 u32 sx, u32 sy,
511 u32 dx, u32 dy); 523 u32 dx, u32 dy);
512 524
513uint64_t
514qxl_release_alloc(struct qxl_device *qdev, int type,
515 struct qxl_release **ret);
516
517void qxl_release_free(struct qxl_device *qdev, 525void qxl_release_free(struct qxl_device *qdev,
518 struct qxl_release *release); 526 struct qxl_release *release);
519void qxl_release_add_res(struct qxl_device *qdev, 527
520 struct qxl_release *release,
521 struct qxl_bo *bo);
522/* used by qxl_debugfs_release */ 528/* used by qxl_debugfs_release */
523struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 529struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
524 uint64_t id); 530 uint64_t id);
@@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
561int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); 567int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
562 568
563/* qxl_fence.c */ 569/* qxl_fence.c */
564int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); 570void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
565int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); 571int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
566int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); 572int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
567void qxl_fence_fini(struct qxl_fence *qfence); 573void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d88d684..88722f233430 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
37 37
38#define QXL_DIRTY_DELAY (HZ / 30) 38#define QXL_DIRTY_DELAY (HZ / 30)
39 39
40#define QXL_FB_OP_FILLRECT 1
41#define QXL_FB_OP_COPYAREA 2
42#define QXL_FB_OP_IMAGEBLIT 3
43
44struct qxl_fb_op {
45 struct list_head head;
46 int op_type;
47 union {
48 struct fb_fillrect fr;
49 struct fb_copyarea ca;
50 struct fb_image ib;
51 } op;
52 void *img_data;
53};
54
40struct qxl_fbdev { 55struct qxl_fbdev {
41 struct drm_fb_helper helper; 56 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb; 57 struct qxl_framebuffer qfb;
43 struct list_head fbdev_list; 58 struct list_head fbdev_list;
44 struct qxl_device *qdev; 59 struct qxl_device *qdev;
45 60
61 spinlock_t delayed_ops_lock;
62 struct list_head delayed_ops;
46 void *shadow; 63 void *shadow;
47 int size; 64 int size;
48 65
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
164 .deferred_io = qxl_deferred_io, 181 .deferred_io = qxl_deferred_io,
165}; 182};
166 183
167static void qxl_fb_fillrect(struct fb_info *info, 184static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
168 const struct fb_fillrect *fb_rect) 185 const struct fb_fillrect *fb_rect)
186{
187 struct qxl_fb_op *op;
188 unsigned long flags;
189
190 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
191 if (!op)
192 return;
193
194 op->op.fr = *fb_rect;
195 op->img_data = NULL;
196 op->op_type = QXL_FB_OP_FILLRECT;
197
198 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
199 list_add_tail(&op->head, &qfbdev->delayed_ops);
200 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
201}
202
203static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
204 const struct fb_copyarea *fb_copy)
205{
206 struct qxl_fb_op *op;
207 unsigned long flags;
208
209 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
210 if (!op)
211 return;
212
213 op->op.ca = *fb_copy;
214 op->img_data = NULL;
215 op->op_type = QXL_FB_OP_COPYAREA;
216
217 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
218 list_add_tail(&op->head, &qfbdev->delayed_ops);
219 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
220}
221
222static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
223 const struct fb_image *fb_image)
224{
225 struct qxl_fb_op *op;
226 unsigned long flags;
227 uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
228
229 op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
230 if (!op)
231 return;
232
233 op->op.ib = *fb_image;
234 op->img_data = (void *)(op + 1);
235 op->op_type = QXL_FB_OP_IMAGEBLIT;
236
237 memcpy(op->img_data, fb_image->data, size);
238
239 op->op.ib.data = op->img_data;
240 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
241 list_add_tail(&op->head, &qfbdev->delayed_ops);
242 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
243}
244
245static void qxl_fb_fillrect_internal(struct fb_info *info,
246 const struct fb_fillrect *fb_rect)
169{ 247{
170 struct qxl_fbdev *qfbdev = info->par; 248 struct qxl_fbdev *qfbdev = info->par;
171 struct qxl_device *qdev = qfbdev->qdev; 249 struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
203 qxl_draw_fill_rec.rect = rect; 281 qxl_draw_fill_rec.rect = rect;
204 qxl_draw_fill_rec.color = color; 282 qxl_draw_fill_rec.color = color;
205 qxl_draw_fill_rec.rop = rop; 283 qxl_draw_fill_rec.rop = rop;
284
285 qxl_draw_fill(&qxl_draw_fill_rec);
286}
287
288static void qxl_fb_fillrect(struct fb_info *info,
289 const struct fb_fillrect *fb_rect)
290{
291 struct qxl_fbdev *qfbdev = info->par;
292 struct qxl_device *qdev = qfbdev->qdev;
293
206 if (!drm_can_sleep()) { 294 if (!drm_can_sleep()) {
207 qxl_io_log(qdev, 295 qxl_fb_delayed_fillrect(qfbdev, fb_rect);
208 "%s: TODO use RCU, mysterious locks with spin_lock\n", 296 schedule_work(&qdev->fb_work);
209 __func__);
210 return; 297 return;
211 } 298 }
212 qxl_draw_fill(&qxl_draw_fill_rec); 299 /* make sure any previous work is done */
300 flush_work(&qdev->fb_work);
301 qxl_fb_fillrect_internal(info, fb_rect);
213} 302}
214 303
215static void qxl_fb_copyarea(struct fb_info *info, 304static void qxl_fb_copyarea_internal(struct fb_info *info,
216 const struct fb_copyarea *region) 305 const struct fb_copyarea *region)
217{ 306{
218 struct qxl_fbdev *qfbdev = info->par; 307 struct qxl_fbdev *qfbdev = info->par;
219 308
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
223 region->dx, region->dy); 312 region->dx, region->dy);
224} 313}
225 314
315static void qxl_fb_copyarea(struct fb_info *info,
316 const struct fb_copyarea *region)
317{
318 struct qxl_fbdev *qfbdev = info->par;
319 struct qxl_device *qdev = qfbdev->qdev;
320
321 if (!drm_can_sleep()) {
322 qxl_fb_delayed_copyarea(qfbdev, region);
323 schedule_work(&qdev->fb_work);
324 return;
325 }
326 /* make sure any previous work is done */
327 flush_work(&qdev->fb_work);
328 qxl_fb_copyarea_internal(info, region);
329}
330
226static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) 331static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
227{ 332{
228 qxl_draw_opaque_fb(qxl_fb_image, 0); 333 qxl_draw_opaque_fb(qxl_fb_image, 0);
229} 334}
230 335
336static void qxl_fb_imageblit_internal(struct fb_info *info,
337 const struct fb_image *image)
338{
339 struct qxl_fbdev *qfbdev = info->par;
340 struct qxl_fb_image qxl_fb_image;
341
342 /* ensure proper order rendering operations - TODO: must do this
343 * for everything. */
344 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
345 qxl_fb_imageblit_safe(&qxl_fb_image);
346}
347
231static void qxl_fb_imageblit(struct fb_info *info, 348static void qxl_fb_imageblit(struct fb_info *info,
232 const struct fb_image *image) 349 const struct fb_image *image)
233{ 350{
234 struct qxl_fbdev *qfbdev = info->par; 351 struct qxl_fbdev *qfbdev = info->par;
235 struct qxl_device *qdev = qfbdev->qdev; 352 struct qxl_device *qdev = qfbdev->qdev;
236 struct qxl_fb_image qxl_fb_image;
237 353
238 if (!drm_can_sleep()) { 354 if (!drm_can_sleep()) {
239 /* we cannot do any ttm_bo allocation since that will fail on 355 qxl_fb_delayed_imageblit(qfbdev, image);
240 * ioremap_wc..__get_vm_area_node, so queue the work item 356 schedule_work(&qdev->fb_work);
241 * instead This can happen from printk inside an interrupt
242 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
243 qxl_io_log(qdev,
244 "%s: TODO use RCU, mysterious locks with spin_lock\n",
245 __func__);
246 return; 357 return;
247 } 358 }
359 /* make sure any previous work is done */
360 flush_work(&qdev->fb_work);
361 qxl_fb_imageblit_internal(info, image);
362}
248 363
249 /* ensure proper order of rendering operations - TODO: must do this 364static void qxl_fb_work(struct work_struct *work)
250 * for everything. */ 365{
251 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); 366 struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
252 qxl_fb_imageblit_safe(&qxl_fb_image); 367 unsigned long flags;
368 struct qxl_fb_op *entry, *tmp;
369 struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
370
371 /* since the irq context just adds entries to the end of the
372 list dropping the lock should be fine, as entry isn't modified
373 in the operation code */
374 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
375 list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
376 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
377 switch (entry->op_type) {
378 case QXL_FB_OP_FILLRECT:
379 qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
380 break;
381 case QXL_FB_OP_COPYAREA:
382 qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
383 break;
384 case QXL_FB_OP_IMAGEBLIT:
385 qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
386 break;
387 }
388 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
389 list_del(&entry->head);
390 kfree(entry);
391 }
392 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
253} 393}
254 394
255int qxl_fb_init(struct qxl_device *qdev) 395int qxl_fb_init(struct qxl_device *qdev)
256{ 396{
397 INIT_WORK(&qdev->fb_work, qxl_fb_work);
257 return 0; 398 return 0;
258} 399}
259 400
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
536 qfbdev->qdev = qdev; 677 qfbdev->qdev = qdev;
537 qdev->mode_info.qfbdev = qfbdev; 678 qdev->mode_info.qfbdev = qfbdev;
538 qfbdev->helper.funcs = &qxl_fb_helper_funcs; 679 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
539 680 spin_lock_init(&qfbdev->delayed_ops_lock);
681 INIT_LIST_HEAD(&qfbdev->delayed_ops);
540 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, 682 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
541 qxl_num_crtc /* num_crtc - QXL supports just 1 */, 683 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
542 QXLFB_CONN_LIMIT); 684 QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715ad385..ae59e91cfb9a 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
49 49
50 For some reason every so often qxl hw fails to release, things go wrong. 50 For some reason every so often qxl hw fails to release, things go wrong.
51*/ 51*/
52 52/* must be called with the fence lock held */
53 53void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
54int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
55{ 54{
56 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
57
58 spin_lock(&bo->tbo.bdev->fence_lock);
59 radix_tree_insert(&qfence->tree, rel_id, qfence); 55 radix_tree_insert(&qfence->tree, rel_id, qfence);
60 qfence->num_active_releases++; 56 qfence->num_active_releases++;
61 spin_unlock(&bo->tbo.bdev->fence_lock);
62 return 0;
63} 57}
64 58
65int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) 59int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693aabba..25e1777fb0a2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
55 /* At least align on page size */ 55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) 56 if (alignment < PAGE_SIZE)
57 alignment = PAGE_SIZE; 57 alignment = PAGE_SIZE;
58 r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); 58 r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
59 if (r) { 59 if (r) {
60 if (r != -ERESTARTSYS) 60 if (r != -ERESTARTSYS)
61 DRM_ERROR( 61 DRM_ERROR(
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf856206996b..7fbcc35e8ad3 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
30#include "qxl_object.h" 30#include "qxl_object.h"
31 31
32static int 32static int
33qxl_image_create_helper(struct qxl_device *qdev, 33qxl_allocate_chunk(struct qxl_device *qdev,
34 struct qxl_release *release,
35 struct qxl_drm_image *image,
36 unsigned int chunk_size)
37{
38 struct qxl_drm_chunk *chunk;
39 int ret;
40
41 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
42 if (!chunk)
43 return -ENOMEM;
44
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
46 if (ret) {
47 kfree(chunk);
48 return ret;
49 }
50
51 list_add_tail(&chunk->head, &image->chunk_list);
52 return 0;
53}
54
55int
56qxl_image_alloc_objects(struct qxl_device *qdev,
34 struct qxl_release *release, 57 struct qxl_release *release,
35 struct qxl_bo **image_bo, 58 struct qxl_drm_image **image_ptr,
36 const uint8_t *data, 59 int height, int stride)
37 int width, int height, 60{
38 int depth, unsigned int hash, 61 struct qxl_drm_image *image;
39 int stride) 62 int ret;
63
64 image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
65 if (!image)
66 return -ENOMEM;
67
68 INIT_LIST_HEAD(&image->chunk_list);
69
70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
71 if (ret) {
72 kfree(image);
73 return ret;
74 }
75
76 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
77 if (ret) {
78 qxl_bo_unref(&image->bo);
79 kfree(image);
80 return ret;
81 }
82 *image_ptr = image;
83 return 0;
84}
85
86void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
40{ 87{
88 struct qxl_drm_chunk *chunk, *tmp;
89
90 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
91 qxl_bo_unref(&chunk->bo);
92 kfree(chunk);
93 }
94
95 qxl_bo_unref(&dimage->bo);
96 kfree(dimage);
97}
98
99static int
100qxl_image_init_helper(struct qxl_device *qdev,
101 struct qxl_release *release,
102 struct qxl_drm_image *dimage,
103 const uint8_t *data,
104 int width, int height,
105 int depth, unsigned int hash,
106 int stride)
107{
108 struct qxl_drm_chunk *drv_chunk;
41 struct qxl_image *image; 109 struct qxl_image *image;
42 struct qxl_data_chunk *chunk; 110 struct qxl_data_chunk *chunk;
43 int i; 111 int i;
44 int chunk_stride; 112 int chunk_stride;
45 int linesize = width * depth / 8; 113 int linesize = width * depth / 8;
46 struct qxl_bo *chunk_bo; 114 struct qxl_bo *chunk_bo, *image_bo;
47 int ret;
48 void *ptr; 115 void *ptr;
49 /* Chunk */ 116 /* Chunk */
50 /* FIXME: Check integer overflow */ 117 /* FIXME: Check integer overflow */
51 /* TODO: variable number of chunks */ 118 /* TODO: variable number of chunks */
119
120 drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
121
122 chunk_bo = drv_chunk->bo;
52 chunk_stride = stride; /* TODO: should use linesize, but it renders 123 chunk_stride = stride; /* TODO: should use linesize, but it renders
53 wrong (check the bitmaps are sent correctly 124 wrong (check the bitmaps are sent correctly
54 first) */ 125 first) */
55 ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, 126
56 &chunk_bo);
57
58 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); 127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
59 chunk = ptr; 128 chunk = ptr;
60 chunk->data_size = height * chunk_stride; 129 chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
102 while (remain > 0) { 171 while (remain > 0) {
103 page_base = out_offset & PAGE_MASK; 172 page_base = out_offset & PAGE_MASK;
104 page_offset = offset_in_page(out_offset); 173 page_offset = offset_in_page(out_offset);
105
106 size = min((int)(PAGE_SIZE - page_offset), remain); 174 size = min((int)(PAGE_SIZE - page_offset), remain);
107 175
108 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); 176 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
116 } 184 }
117 } 185 }
118 } 186 }
119
120
121 qxl_bo_kunmap(chunk_bo); 187 qxl_bo_kunmap(chunk_bo);
122 188
123 /* Image */ 189 image_bo = dimage->bo;
124 ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); 190 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
125
126 ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
127 image = ptr; 191 image = ptr;
128 192
129 image->descriptor.id = 0; 193 image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
154 image->u.bitmap.stride = chunk_stride; 218 image->u.bitmap.stride = chunk_stride;
155 image->u.bitmap.palette = 0; 219 image->u.bitmap.palette = 0;
156 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); 220 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
157 qxl_release_add_res(qdev, release, chunk_bo);
158 qxl_bo_unreserve(chunk_bo);
159 qxl_bo_unref(&chunk_bo);
160 221
161 qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); 222 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
162 223
163 return 0; 224 return 0;
164} 225}
165 226
166int qxl_image_create(struct qxl_device *qdev, 227int qxl_image_init(struct qxl_device *qdev,
167 struct qxl_release *release, 228 struct qxl_release *release,
168 struct qxl_bo **image_bo, 229 struct qxl_drm_image *dimage,
169 const uint8_t *data, 230 const uint8_t *data,
170 int x, int y, int width, int height, 231 int x, int y, int width, int height,
171 int depth, int stride) 232 int depth, int stride)
172{ 233{
173 data += y * stride + x * (depth / 8); 234 data += y * stride + x * (depth / 8);
174 return qxl_image_create_helper(qdev, release, image_bo, data, 235 return qxl_image_init_helper(qdev, release, dimage, data,
175 width, height, depth, 0, stride); 236 width, height, depth, 0, stride);
176} 237}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e49250d..6de33563d6f1 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
68 &qxl_map->offset); 68 &qxl_map->offset);
69} 69}
70 70
71struct qxl_reloc_info {
72 int type;
73 struct qxl_bo *dst_bo;
74 uint32_t dst_offset;
75 struct qxl_bo *src_bo;
76 int src_offset;
77};
78
71/* 79/*
72 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's 80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73 * are on vram). 81 * are on vram).
74 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) 82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75 */ 83 */
76static void 84static void
77apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
78 struct qxl_bo *src, uint64_t src_off)
79{ 86{
80 void *reloc_page; 87 void *reloc_page;
81 88 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
82 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 89 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
83 *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, 90 info->src_bo,
84 src, src_off); 91 info->src_offset);
85 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 92 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
86} 93}
87 94
88static void 95static void
89apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 96apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
90 struct qxl_bo *src)
91{ 97{
92 uint32_t id = 0; 98 uint32_t id = 0;
93 void *reloc_page; 99 void *reloc_page;
94 100
95 if (src && !src->is_primary) 101 if (info->src_bo && !info->src_bo->is_primary)
96 id = src->surface_id; 102 id = info->src_bo->surface_id;
97 103
98 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 104 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
99 *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; 105 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
100 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 106 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
101} 107}
102 108
103/* return holding the reference to this object */ 109/* return holding the reference to this object */
104static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, 110static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105 struct drm_file *file_priv, uint64_t handle, 111 struct drm_file *file_priv, uint64_t handle,
106 struct qxl_reloc_list *reloc_list) 112 struct qxl_release *release)
107{ 113{
108 struct drm_gem_object *gobj; 114 struct drm_gem_object *gobj;
109 struct qxl_bo *qobj; 115 struct qxl_bo *qobj;
110 int ret; 116 int ret;
111 117
112 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); 118 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113 if (!gobj) { 119 if (!gobj)
114 DRM_ERROR("bad bo handle %lld\n", handle);
115 return NULL; 120 return NULL;
116 } 121
117 qobj = gem_to_qxl_bo(gobj); 122 qobj = gem_to_qxl_bo(gobj);
118 123
119 ret = qxl_bo_list_add(reloc_list, qobj); 124 ret = qxl_release_list_add(release, qobj);
120 if (ret) 125 if (ret)
121 return NULL; 126 return NULL;
122 127
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
129 * However, the command as passed from user space must *not* contain the initial 134 * However, the command as passed from user space must *not* contain the initial
130 * QXLReleaseInfo struct (first XXX bytes) 135 * QXLReleaseInfo struct (first XXX bytes)
131 */ 136 */
132static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, 137static int qxl_process_single_command(struct qxl_device *qdev,
133 struct drm_file *file_priv) 138 struct drm_qxl_command *cmd,
139 struct drm_file *file_priv)
134{ 140{
135 struct qxl_device *qdev = dev->dev_private; 141 struct qxl_reloc_info *reloc_info;
136 struct drm_qxl_execbuffer *execbuffer = data; 142 int release_type;
137 struct drm_qxl_command user_cmd; 143 struct qxl_release *release;
138 int cmd_num; 144 struct qxl_bo *cmd_bo;
139 struct qxl_bo *reloc_src_bo;
140 struct qxl_bo *reloc_dst_bo;
141 struct drm_qxl_reloc reloc;
142 void *fb_cmd; 145 void *fb_cmd;
143 int i, ret; 146 int i, j, ret, num_relocs;
144 struct qxl_reloc_list reloc_list;
145 int unwritten; 147 int unwritten;
146 uint32_t reloc_dst_offset;
147 INIT_LIST_HEAD(&reloc_list.bos);
148 148
149 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { 149 switch (cmd->type) {
150 struct qxl_release *release; 150 case QXL_CMD_DRAW:
151 struct qxl_bo *cmd_bo; 151 release_type = QXL_RELEASE_DRAWABLE;
152 int release_type; 152 break;
153 struct drm_qxl_command *commands = 153 case QXL_CMD_SURFACE:
154 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; 154 case QXL_CMD_CURSOR:
155 default:
156 DRM_DEBUG("Only draw commands in execbuffers\n");
157 return -EINVAL;
158 break;
159 }
155 160
156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], 161 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
157 sizeof(user_cmd))) 162 return -EINVAL;
158 return -EFAULT;
159 switch (user_cmd.type) {
160 case QXL_CMD_DRAW:
161 release_type = QXL_RELEASE_DRAWABLE;
162 break;
163 case QXL_CMD_SURFACE:
164 case QXL_CMD_CURSOR:
165 default:
166 DRM_DEBUG("Only draw commands in execbuffers\n");
167 return -EINVAL;
168 break;
169 }
170 163
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 164 if (!access_ok(VERIFY_READ,
172 return -EINVAL; 165 (void *)(unsigned long)cmd->command,
166 cmd->command_size))
167 return -EFAULT;
173 168
174 if (!access_ok(VERIFY_READ, 169 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
175 (void *)(unsigned long)user_cmd.command, 170 if (!reloc_info)
176 user_cmd.command_size)) 171 return -ENOMEM;
177 return -EFAULT;
178 172
179 ret = qxl_alloc_release_reserved(qdev, 173 ret = qxl_alloc_release_reserved(qdev,
180 sizeof(union qxl_release_info) + 174 sizeof(union qxl_release_info) +
181 user_cmd.command_size, 175 cmd->command_size,
182 release_type, 176 release_type,
183 &release, 177 &release,
184 &cmd_bo); 178 &cmd_bo);
185 if (ret) 179 if (ret)
186 return ret; 180 goto out_free_reloc;
187 181
188 /* TODO copy slow path code from i915 */ 182 /* TODO copy slow path code from i915 */
189 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 183 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
190 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); 184 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
191 185
192 { 186 {
193 struct qxl_drawable *draw = fb_cmd; 187 struct qxl_drawable *draw = fb_cmd;
188 draw->mm_time = qdev->rom->mm_clock;
189 }
194 190
195 draw->mm_time = qdev->rom->mm_clock; 191 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
196 } 192 if (unwritten) {
197 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 193 DRM_ERROR("got unwritten %d\n", unwritten);
198 if (unwritten) { 194 ret = -EFAULT;
199 DRM_ERROR("got unwritten %d\n", unwritten); 195 goto out_free_release;
200 qxl_release_unreserve(qdev, release); 196 }
201 qxl_release_free(qdev, release); 197
202 return -EFAULT; 198 /* fill out reloc info structs */
199 num_relocs = 0;
200 for (i = 0; i < cmd->relocs_num; ++i) {
201 struct drm_qxl_reloc reloc;
202
203 if (DRM_COPY_FROM_USER(&reloc,
204 &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
205 sizeof(reloc))) {
206 ret = -EFAULT;
207 goto out_free_bos;
203 } 208 }
204 209
205 for (i = 0 ; i < user_cmd.relocs_num; ++i) { 210 /* add the bos to the list of bos to validate -
206 if (DRM_COPY_FROM_USER(&reloc, 211 need to validate first then process relocs? */
207 &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], 212 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
208 sizeof(reloc))) { 213 DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
209 qxl_bo_list_unreserve(&reloc_list, true);
210 qxl_release_unreserve(qdev, release);
211 qxl_release_free(qdev, release);
212 return -EFAULT;
213 }
214 214
215 /* add the bos to the list of bos to validate - 215 ret = -EINVAL;
216 need to validate first then process relocs? */ 216 goto out_free_bos;
217 if (reloc.dst_handle) { 217 }
218 reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, 218 reloc_info[i].type = reloc.reloc_type;
219 reloc.dst_handle, &reloc_list); 219
220 if (!reloc_dst_bo) { 220 if (reloc.dst_handle) {
221 qxl_bo_list_unreserve(&reloc_list, true); 221 reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
222 qxl_release_unreserve(qdev, release); 222 reloc.dst_handle, release);
223 qxl_release_free(qdev, release); 223 if (!reloc_info[i].dst_bo) {
224 return -EINVAL; 224 ret = -EINVAL;
225 } 225 reloc_info[i].src_bo = NULL;
226 reloc_dst_offset = 0; 226 goto out_free_bos;
227 } else {
228 reloc_dst_bo = cmd_bo;
229 reloc_dst_offset = release->release_offset;
230 } 227 }
231 228 reloc_info[i].dst_offset = reloc.dst_offset;
232 /* reserve and validate the reloc dst bo */ 229 } else {
233 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { 230 reloc_info[i].dst_bo = cmd_bo;
234 reloc_src_bo = 231 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
235 qxlhw_handle_to_bo(qdev, file_priv, 232 }
236 reloc.src_handle, &reloc_list); 233 num_relocs++;
237 if (!reloc_src_bo) { 234
238 if (reloc_dst_bo != cmd_bo) 235 /* reserve and validate the reloc dst bo */
239 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 236 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
240 qxl_bo_list_unreserve(&reloc_list, true); 237 reloc_info[i].src_bo =
241 qxl_release_unreserve(qdev, release); 238 qxlhw_handle_to_bo(qdev, file_priv,
242 qxl_release_free(qdev, release); 239 reloc.src_handle, release);
243 return -EINVAL; 240 if (!reloc_info[i].src_bo) {
244 } 241 if (reloc_info[i].dst_bo != cmd_bo)
245 } else 242 drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
246 reloc_src_bo = NULL; 243 ret = -EINVAL;
247 if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { 244 goto out_free_bos;
248 apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
249 reloc_src_bo, reloc.src_offset);
250 } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
251 apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
252 } else {
253 DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
254 return -EINVAL;
255 } 245 }
246 reloc_info[i].src_offset = reloc.src_offset;
247 } else {
248 reloc_info[i].src_bo = NULL;
249 reloc_info[i].src_offset = 0;
250 }
251 }
256 252
257 if (reloc_src_bo && reloc_src_bo != cmd_bo) { 253 /* validate all buffers */
258 qxl_release_add_res(qdev, release, reloc_src_bo); 254 ret = qxl_release_reserve_list(release, false);
259 drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); 255 if (ret)
260 } 256 goto out_free_bos;
261 257
262 if (reloc_dst_bo != cmd_bo) 258 for (i = 0; i < cmd->relocs_num; ++i) {
263 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 259 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
264 } 260 apply_reloc(qdev, &reloc_info[i]);
265 qxl_fence_releaseable(qdev, release); 261 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
262 apply_surf_reloc(qdev, &reloc_info[i]);
263 }
266 264
267 ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); 265 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
268 if (ret == -ERESTARTSYS) { 266 if (ret)
269 qxl_release_unreserve(qdev, release); 267 qxl_release_backoff_reserve_list(release);
270 qxl_release_free(qdev, release); 268 else
271 qxl_bo_list_unreserve(&reloc_list, true); 269 qxl_release_fence_buffer_objects(release);
270
271out_free_bos:
272 for (j = 0; j < num_relocs; j++) {
273 if (reloc_info[j].dst_bo != cmd_bo)
274 drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
275 if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
276 drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
277 }
278out_free_release:
279 if (ret)
280 qxl_release_free(qdev, release);
281out_free_reloc:
282 kfree(reloc_info);
283 return ret;
284}
285
286static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file_priv)
288{
289 struct qxl_device *qdev = dev->dev_private;
290 struct drm_qxl_execbuffer *execbuffer = data;
291 struct drm_qxl_command user_cmd;
292 int cmd_num;
293 int ret;
294
295 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
296
297 struct drm_qxl_command *commands =
298 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
299
300 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
301 sizeof(user_cmd)))
302 return -EFAULT;
303
304 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
305 if (ret)
272 return ret; 306 return ret;
273 }
274 qxl_release_unreserve(qdev, release);
275 } 307 }
276 qxl_bo_list_unreserve(&reloc_list, 0);
277 return 0; 308 return 0;
278} 309}
279 310
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
305 goto out; 336 goto out;
306 337
307 if (!qobj->pin_count) { 338 if (!qobj->pin_count) {
308 qxl_ttm_placement_from_domain(qobj, qobj->type); 339 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
309 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 340 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
310 true, false); 341 true, false);
311 if (unlikely(ret)) 342 if (unlikely(ret))
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7788c9..aa161cddd87e 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
51 return false; 51 return false;
52} 52}
53 53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) 54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{ 55{
56 u32 c = 0; 56 u32 c = 0;
57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57 58
58 qbo->placement.fpfn = 0; 59 qbo->placement.fpfn = 0;
59 qbo->placement.lpfn = 0; 60 qbo->placement.lpfn = 0;
60 qbo->placement.placement = qbo->placements; 61 qbo->placement.placement = qbo->placements;
61 qbo->placement.busy_placement = qbo->placements; 62 qbo->placement.busy_placement = qbo->placements;
62 if (domain == QXL_GEM_DOMAIN_VRAM) 63 if (domain == QXL_GEM_DOMAIN_VRAM)
63 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; 64 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
64 if (domain == QXL_GEM_DOMAIN_SURFACE) 65 if (domain == QXL_GEM_DOMAIN_SURFACE)
65 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; 66 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
66 if (domain == QXL_GEM_DOMAIN_CPU) 67 if (domain == QXL_GEM_DOMAIN_CPU)
67 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 68 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
68 if (!c) 69 if (!c)
69 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
70 qbo->placement.num_placement = c; 71 qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
73 74
74 75
75int qxl_bo_create(struct qxl_device *qdev, 76int qxl_bo_create(struct qxl_device *qdev,
76 unsigned long size, bool kernel, u32 domain, 77 unsigned long size, bool kernel, bool pinned, u32 domain,
77 struct qxl_surface *surf, 78 struct qxl_surface *surf,
78 struct qxl_bo **bo_ptr) 79 struct qxl_bo **bo_ptr)
79{ 80{
@@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev,
99 } 100 }
100 bo->gem_base.driver_private = NULL; 101 bo->gem_base.driver_private = NULL;
101 bo->type = domain; 102 bo->type = domain;
102 bo->pin_count = 0; 103 bo->pin_count = pinned ? 1 : 0;
103 bo->surface_id = 0; 104 bo->surface_id = 0;
104 qxl_fence_init(qdev, &bo->fence); 105 qxl_fence_init(qdev, &bo->fence);
105 INIT_LIST_HEAD(&bo->list); 106 INIT_LIST_HEAD(&bo->list);
106 atomic_set(&bo->reserve_count, 0); 107
107 if (surf) 108 if (surf)
108 bo->surf = *surf; 109 bo->surf = *surf;
109 110
110 qxl_ttm_placement_from_domain(bo, domain); 111 qxl_ttm_placement_from_domain(bo, domain, pinned);
111 112
112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 113 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113 &bo->placement, 0, !kernel, NULL, size, 114 &bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 229int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229{ 230{
230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 231 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231 int r, i; 232 int r;
232 233
233 if (bo->pin_count) { 234 if (bo->pin_count) {
234 bo->pin_count++; 235 bo->pin_count++;
@@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
236 *gpu_addr = qxl_bo_gpu_offset(bo); 237 *gpu_addr = qxl_bo_gpu_offset(bo);
237 return 0; 238 return 0;
238 } 239 }
239 qxl_ttm_placement_from_domain(bo, domain); 240 qxl_ttm_placement_from_domain(bo, domain, true);
240 for (i = 0; i < bo->placement.num_placement; i++)
241 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
242 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 241 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
243 if (likely(r == 0)) { 242 if (likely(r == 0)) {
244 bo->pin_count = 1; 243 bo->pin_count = 1;
@@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
317 return 0; 316 return 0;
318} 317}
319 318
320void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
321{
322 struct qxl_bo_list *entry, *sf;
323
324 list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
325 qxl_bo_unreserve(entry->bo);
326 list_del(&entry->lhead);
327 kfree(entry);
328 }
329}
330
331int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
332{
333 struct qxl_bo_list *entry;
334 int ret;
335
336 list_for_each_entry(entry, &reloc_list->bos, lhead) {
337 if (entry->bo == bo)
338 return 0;
339 }
340
341 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
342 if (!entry)
343 return -ENOMEM;
344
345 entry->bo = bo;
346 list_add(&entry->lhead, &reloc_list->bos);
347
348 ret = qxl_bo_reserve(bo, false);
349 if (ret)
350 return ret;
351
352 if (!bo->pin_count) {
353 qxl_ttm_placement_from_domain(bo, bo->type);
354 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
355 true, false);
356 if (ret)
357 return ret;
358 }
359
360 /* allocate a surface for reserved + validated buffers */
361 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
362 if (ret)
363 return ret;
364 return 0;
365}
366
367int qxl_surf_evict(struct qxl_device *qdev) 319int qxl_surf_evict(struct qxl_device *qdev)
368{ 320{
369 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); 321 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79ce781..8cb6167038e5 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
88 88
89extern int qxl_bo_create(struct qxl_device *qdev, 89extern int qxl_bo_create(struct qxl_device *qdev,
90 unsigned long size, 90 unsigned long size,
91 bool kernel, u32 domain, 91 bool kernel, bool pinned, u32 domain,
92 struct qxl_surface *surf, 92 struct qxl_surface *surf,
93 struct qxl_bo **bo_ptr); 93 struct qxl_bo **bo_ptr);
94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); 94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
99extern void qxl_bo_unref(struct qxl_bo **bo); 99extern void qxl_bo_unref(struct qxl_bo **bo);
100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); 100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
101extern int qxl_bo_unpin(struct qxl_bo *bo); 101extern int qxl_bo_unpin(struct qxl_bo *bo);
102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); 102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); 103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
104 104
105extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
106extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
107#endif 105#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d6751d5f..b61449e52cd5 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
38 38
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41uint64_t 41
42static uint64_t
42qxl_release_alloc(struct qxl_device *qdev, int type, 43qxl_release_alloc(struct qxl_device *qdev, int type,
43 struct qxl_release **ret) 44 struct qxl_release **ret)
44{ 45{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
53 return 0; 54 return 0;
54 } 55 }
55 release->type = type; 56 release->type = type;
56 release->bo_count = 0;
57 release->release_offset = 0; 57 release->release_offset = 0;
58 release->surface_release_id = 0; 58 release->surface_release_id = 0;
59 INIT_LIST_HEAD(&release->bos);
59 60
60 idr_preload(GFP_KERNEL); 61 idr_preload(GFP_KERNEL);
61 spin_lock(&qdev->release_idr_lock); 62 spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
77qxl_release_free(struct qxl_device *qdev, 78qxl_release_free(struct qxl_device *qdev,
78 struct qxl_release *release) 79 struct qxl_release *release)
79{ 80{
80 int i; 81 struct qxl_bo_list *entry, *tmp;
81 82 QXL_INFO(qdev, "release %d, type %d\n", release->id,
82 QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, 83 release->type);
83 release->type, release->bo_count);
84 84
85 if (release->surface_release_id) 85 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 86 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87 87
88 for (i = 0 ; i < release->bo_count; ++i) { 88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
89 QXL_INFO(qdev, "release %llx\n", 90 QXL_INFO(qdev, "release %llx\n",
90 release->bos[i]->tbo.addr_space_offset 91 entry->tv.bo->addr_space_offset
91 - DRM_FILE_OFFSET); 92 - DRM_FILE_OFFSET);
92 qxl_fence_remove_release(&release->bos[i]->fence, release->id); 93 qxl_fence_remove_release(&bo->fence, release->id);
93 qxl_bo_unref(&release->bos[i]); 94 qxl_bo_unref(&bo);
94 } 95 }
95 spin_lock(&qdev->release_idr_lock); 96 spin_lock(&qdev->release_idr_lock);
96 idr_remove(&qdev->release_idr, release->id); 97 idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
98 kfree(release); 99 kfree(release);
99} 100}
100 101
101void
102qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
103 struct qxl_bo *bo)
104{
105 int i;
106 for (i = 0; i < release->bo_count; i++)
107 if (release->bos[i] == bo)
108 return;
109
110 if (release->bo_count >= QXL_MAX_RES) {
111 DRM_ERROR("exceeded max resource on a qxl_release item\n");
112 return;
113 }
114 release->bos[release->bo_count++] = qxl_bo_ref(bo);
115}
116
117static int qxl_release_bo_alloc(struct qxl_device *qdev, 102static int qxl_release_bo_alloc(struct qxl_device *qdev,
118 struct qxl_bo **bo) 103 struct qxl_bo **bo)
119{ 104{
120 int ret; 105 int ret;
121 ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, 106 /* pin releases bo's they are too messy to evict */
107 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
108 QXL_GEM_DOMAIN_VRAM, NULL,
122 bo); 109 bo);
123 return ret; 110 return ret;
124} 111}
125 112
126int qxl_release_reserve(struct qxl_device *qdev, 113int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
127 struct qxl_release *release, bool no_wait) 114{
115 struct qxl_bo_list *entry;
116
117 list_for_each_entry(entry, &release->bos, tv.head) {
118 if (entry->tv.bo == &bo->tbo)
119 return 0;
120 }
121
122 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
123 if (!entry)
124 return -ENOMEM;
125
126 qxl_bo_ref(bo);
127 entry->tv.bo = &bo->tbo;
128 list_add_tail(&entry->tv.head, &release->bos);
129 return 0;
130}
131
132static int qxl_release_validate_bo(struct qxl_bo *bo)
128{ 133{
129 int ret; 134 int ret;
130 if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { 135
131 ret = qxl_bo_reserve(release->bos[0], no_wait); 136 if (!bo->pin_count) {
137 qxl_ttm_placement_from_domain(bo, bo->type, false);
138 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
139 true, false);
132 if (ret) 140 if (ret)
133 return ret; 141 return ret;
134 } 142 }
143
144 /* allocate a surface for reserved + validated buffers */
145 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
146 if (ret)
147 return ret;
148 return 0;
149}
150
151int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
152{
153 int ret;
154 struct qxl_bo_list *entry;
155
156 /* if only one object on the release its the release itself
157 since these objects are pinned no need to reserve */
158 if (list_is_singular(&release->bos))
159 return 0;
160
161 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
162 if (ret)
163 return ret;
164
165 list_for_each_entry(entry, &release->bos, tv.head) {
166 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
167
168 ret = qxl_release_validate_bo(bo);
169 if (ret) {
170 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
171 return ret;
172 }
173 }
135 return 0; 174 return 0;
136} 175}
137 176
138void qxl_release_unreserve(struct qxl_device *qdev, 177void qxl_release_backoff_reserve_list(struct qxl_release *release)
139 struct qxl_release *release)
140{ 178{
141 if (atomic_dec_and_test(&release->bos[0]->reserve_count)) 179 /* if only one object on the release its the release itself
142 qxl_bo_unreserve(release->bos[0]); 180 since these objects are pinned no need to reserve */
181 if (list_is_singular(&release->bos))
182 return;
183
184 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
143} 185}
144 186
187
145int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 188int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
146 enum qxl_surface_cmd_type surface_cmd_type, 189 enum qxl_surface_cmd_type surface_cmd_type,
147 struct qxl_release *create_rel, 190 struct qxl_release *create_rel,
148 struct qxl_release **release) 191 struct qxl_release **release)
149{ 192{
150 int ret;
151
152 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 193 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
153 int idr_ret; 194 int idr_ret;
195 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
154 struct qxl_bo *bo; 196 struct qxl_bo *bo;
155 union qxl_release_info *info; 197 union qxl_release_info *info;
156 198
157 /* stash the release after the create command */ 199 /* stash the release after the create command */
158 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 200 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
159 bo = qxl_bo_ref(create_rel->bos[0]); 201 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
160 202
161 (*release)->release_offset = create_rel->release_offset + 64; 203 (*release)->release_offset = create_rel->release_offset + 64;
162 204
163 qxl_release_add_res(qdev, *release, bo); 205 qxl_release_list_add(*release, bo);
164 206
165 ret = qxl_release_reserve(qdev, *release, false);
166 if (ret) {
167 DRM_ERROR("release reserve failed\n");
168 goto out_unref;
169 }
170 info = qxl_release_map(qdev, *release); 207 info = qxl_release_map(qdev, *release);
171 info->id = idr_ret; 208 info->id = idr_ret;
172 qxl_release_unmap(qdev, *release, info); 209 qxl_release_unmap(qdev, *release, info);
173 210
174
175out_unref:
176 qxl_bo_unref(&bo); 211 qxl_bo_unref(&bo);
177 return ret; 212 return 0;
178 } 213 }
179 214
180 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 215 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
187{ 222{
188 struct qxl_bo *bo; 223 struct qxl_bo *bo;
189 int idr_ret; 224 int idr_ret;
190 int ret; 225 int ret = 0;
191 union qxl_release_info *info; 226 union qxl_release_info *info;
192 int cur_idx; 227 int cur_idx;
193 228
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
216 mutex_unlock(&qdev->release_mutex); 251 mutex_unlock(&qdev->release_mutex);
217 return ret; 252 return ret;
218 } 253 }
219
220 /* pin releases bo's they are too messy to evict */
221 ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
222 qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
223 qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
224 } 254 }
225 255
226 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 256 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
231 if (rbo) 261 if (rbo)
232 *rbo = bo; 262 *rbo = bo;
233 263
234 qxl_release_add_res(qdev, *release, bo);
235
236 ret = qxl_release_reserve(qdev, *release, false);
237 mutex_unlock(&qdev->release_mutex); 264 mutex_unlock(&qdev->release_mutex);
238 if (ret) 265
239 goto out_unref; 266 qxl_release_list_add(*release, bo);
240 267
241 info = qxl_release_map(qdev, *release); 268 info = qxl_release_map(qdev, *release);
242 info->id = idr_ret; 269 info->id = idr_ret;
243 qxl_release_unmap(qdev, *release, info); 270 qxl_release_unmap(qdev, *release, info);
244 271
245out_unref:
246 qxl_bo_unref(&bo); 272 qxl_bo_unref(&bo);
247 return ret; 273 return ret;
248} 274}
249 275
250int qxl_fence_releaseable(struct qxl_device *qdev,
251 struct qxl_release *release)
252{
253 int i, ret;
254 for (i = 0; i < release->bo_count; i++) {
255 if (!release->bos[i]->tbo.sync_obj)
256 release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
257 ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
258 if (ret)
259 return ret;
260 }
261 return 0;
262}
263
264struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 276struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
265 uint64_t id) 277 uint64_t id)
266{ 278{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
273 DRM_ERROR("failed to find id in release_idr\n"); 285 DRM_ERROR("failed to find id in release_idr\n");
274 return NULL; 286 return NULL;
275 } 287 }
276 if (release->bo_count < 1) { 288
277 DRM_ERROR("read a released resource with 0 bos\n");
278 return NULL;
279 }
280 return release; 289 return release;
281} 290}
282 291
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
285{ 294{
286 void *ptr; 295 void *ptr;
287 union qxl_release_info *info; 296 union qxl_release_info *info;
288 struct qxl_bo *bo = release->bos[0]; 297 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
298 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
289 299
290 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 300 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
301 if (!ptr)
302 return NULL;
291 info = ptr + (release->release_offset & ~PAGE_SIZE); 303 info = ptr + (release->release_offset & ~PAGE_SIZE);
292 return info; 304 return info;
293} 305}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
296 struct qxl_release *release, 308 struct qxl_release *release,
297 union qxl_release_info *info) 309 union qxl_release_info *info)
298{ 310{
299 struct qxl_bo *bo = release->bos[0]; 311 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
312 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
300 void *ptr; 313 void *ptr;
301 314
302 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 315 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
303 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 316 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
304} 317}
318
319void qxl_release_fence_buffer_objects(struct qxl_release *release)
320{
321 struct ttm_validate_buffer *entry;
322 struct ttm_buffer_object *bo;
323 struct ttm_bo_global *glob;
324 struct ttm_bo_device *bdev;
325 struct ttm_bo_driver *driver;
326 struct qxl_bo *qbo;
327
328 /* if only one object on the release its the release itself
329 since these objects are pinned no need to reserve */
330 if (list_is_singular(&release->bos))
331 return;
332
333 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
334 bdev = bo->bdev;
335 driver = bdev->driver;
336 glob = bo->glob;
337
338 spin_lock(&glob->lru_lock);
339 spin_lock(&bdev->fence_lock);
340
341 list_for_each_entry(entry, &release->bos, head) {
342 bo = entry->bo;
343 qbo = to_qxl_bo(bo);
344
345 if (!entry->bo->sync_obj)
346 entry->bo->sync_obj = &qbo->fence;
347
348 qxl_fence_add_release_locked(&qbo->fence, release->id);
349
350 ttm_bo_add_to_lru(bo);
351 ww_mutex_unlock(&bo->resv->lock);
352 entry->reserved = false;
353 }
354 spin_unlock(&bdev->fence_lock);
355 spin_unlock(&glob->lru_lock);
356 ww_acquire_fini(&release->ticket);
357}
358
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8cece4d..1dfd84cda2a1 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
206 return; 206 return;
207 } 207 }
208 qbo = container_of(bo, struct qxl_bo, tbo); 208 qbo = container_of(bo, struct qxl_bo, tbo);
209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); 209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
210 *placement = qbo->placement; 210 *placement = qbo->placement;
211} 211}
212 212
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a790f3d..15da7ef344a4 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1222 int r; 1222 int r;
1223 1223
1224 mutex_lock(&ctx->mutex); 1224 mutex_lock(&ctx->mutex);
1225 /* reset data block */
1226 ctx->data_block = 0;
1225 /* reset reg block */ 1227 /* reset reg block */
1226 ctx->reg_block = 0; 1228 ctx->reg_block = 0;
1227 /* reset fb window */ 1229 /* reset fb window */
1228 ctx->fb_base = 0; 1230 ctx->fb_base = 0;
1229 /* reset io mode */ 1231 /* reset io mode */
1230 ctx->io_mode = ATOM_IO_MM; 1232 ctx->io_mode = ATOM_IO_MM;
1233 /* reset divmul */
1234 ctx->divmul[0] = 0;
1235 ctx->divmul[1] = 0;
1231 r = atom_execute_table_locked(ctx, index, params); 1236 r = atom_execute_table_locked(ctx, index, params);
1232 mutex_unlock(&ctx->mutex); 1237 mutex_unlock(&ctx->mutex);
1233 return r; 1238 return r;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023bed480..32501f6ec991 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
44}; 44};
45 45
46/***** radeon AUX functions *****/ 46/***** radeon AUX functions *****/
47
48/* Atom needs data in little endian format
49 * so swap as appropriate when copying data to
50 * or from atom. Note that atom operates on
51 * dw units.
52 */
53static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
54{
55#ifdef __BIG_ENDIAN
56 u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
57 u32 *dst32, *src32;
58 int i;
59
60 memcpy(src_tmp, src, num_bytes);
61 src32 = (u32 *)src_tmp;
62 dst32 = (u32 *)dst_tmp;
63 if (to_le) {
64 for (i = 0; i < ((num_bytes + 3) / 4); i++)
65 dst32[i] = cpu_to_le32(src32[i]);
66 memcpy(dst, dst_tmp, num_bytes);
67 } else {
68 u8 dws = num_bytes & ~3;
69 for (i = 0; i < ((num_bytes + 3) / 4); i++)
70 dst32[i] = le32_to_cpu(src32[i]);
71 memcpy(dst, dst_tmp, dws);
72 if (num_bytes % 4) {
73 for (i = 0; i < (num_bytes % 4); i++)
74 dst[dws+i] = dst_tmp[dws+i];
75 }
76 }
77#else
78 memcpy(dst, src, num_bytes);
79#endif
80}
81
47union aux_channel_transaction { 82union aux_channel_transaction {
48 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; 83 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
49 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; 84 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
65 100
66 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); 101 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
67 102
68 memcpy(base, send, send_bytes); 103 radeon_copy_swap(base, send, send_bytes, true);
69 104
70 args.v1.lpAuxRequest = 0 + 4; 105 args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
71 args.v1.lpDataOut = 16 + 4; 106 args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
72 args.v1.ucDataOutLen = 0; 107 args.v1.ucDataOutLen = 0;
73 args.v1.ucChannelID = chan->rec.i2c_id; 108 args.v1.ucChannelID = chan->rec.i2c_id;
74 args.v1.ucDelay = delay / 10; 109 args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
102 recv_bytes = recv_size; 137 recv_bytes = recv_size;
103 138
104 if (recv && recv_size) 139 if (recv && recv_size)
105 memcpy(recv, base + 16, recv_bytes); 140 radeon_copy_swap(recv, base + 16, recv_bytes, false);
106 141
107 return recv_bytes; 142 return recv_bytes;
108} 143}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0bfd55e08820..9953e1fbc46d 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev)
2548{ 2548{
2549 struct rv7xx_power_info *pi; 2549 struct rv7xx_power_info *pi;
2550 struct evergreen_power_info *eg_pi; 2550 struct evergreen_power_info *eg_pi;
2551 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2552 u16 data_offset, size;
2553 u8 frev, crev;
2554 struct atom_clock_dividers dividers; 2551 struct atom_clock_dividers dividers;
2555 int ret; 2552 int ret;
2556 2553
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev)
2633 eg_pi->vddci_control = 2630 eg_pi->vddci_control =
2634 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); 2631 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2635 2632
2636 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 2633 rv770_get_engine_memory_ss(rdev);
2637 &frev, &crev, &data_offset)) {
2638 pi->sclk_ss = true;
2639 pi->mclk_ss = true;
2640 pi->dynamic_ss = true;
2641 } else {
2642 pi->sclk_ss = false;
2643 pi->mclk_ss = false;
2644 pi->dynamic_ss = true;
2645 }
2646 2634
2647 pi->asi = RV770_ASI_DFLT; 2635 pi->asi = RV770_ASI_DFLT;
2648 pi->pasi = CYPRESS_HASI_DFLT; 2636 pi->pasi = CYPRESS_HASI_DFLT;
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev)
2659 2647
2660 pi->dynamic_pcie_gen2 = true; 2648 pi->dynamic_pcie_gen2 = true;
2661 2649
2662 if (pi->gfx_clock_gating && 2650 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2663 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2664 pi->thermal_protection = true; 2651 pi->thermal_protection = true;
2665 else 2652 else
2666 pi->thermal_protection = false; 2653 pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ed1d91025928..8928bd109c16 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -22,7 +22,6 @@
22 * Authors: Alex Deucher 22 * Authors: Alex Deucher
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include "drmP.h" 27#include "drmP.h"
@@ -742,7 +741,6 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
742 */ 741 */
743static int cik_init_microcode(struct radeon_device *rdev) 742static int cik_init_microcode(struct radeon_device *rdev)
744{ 743{
745 struct platform_device *pdev;
746 const char *chip_name; 744 const char *chip_name;
747 size_t pfp_req_size, me_req_size, ce_req_size, 745 size_t pfp_req_size, me_req_size, ce_req_size,
748 mec_req_size, rlc_req_size, mc_req_size, 746 mec_req_size, rlc_req_size, mc_req_size,
@@ -752,13 +750,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
752 750
753 DRM_DEBUG("\n"); 751 DRM_DEBUG("\n");
754 752
755 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
756 err = IS_ERR(pdev);
757 if (err) {
758 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
759 return -EINVAL;
760 }
761
762 switch (rdev->family) { 753 switch (rdev->family) {
763 case CHIP_BONAIRE: 754 case CHIP_BONAIRE:
764 chip_name = "BONAIRE"; 755 chip_name = "BONAIRE";
@@ -794,7 +785,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
794 DRM_INFO("Loading %s Microcode\n", chip_name); 785 DRM_INFO("Loading %s Microcode\n", chip_name);
795 786
796 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 787 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
797 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 788 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
798 if (err) 789 if (err)
799 goto out; 790 goto out;
800 if (rdev->pfp_fw->size != pfp_req_size) { 791 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -806,7 +797,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
806 } 797 }
807 798
808 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 799 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
809 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 800 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
810 if (err) 801 if (err)
811 goto out; 802 goto out;
812 if (rdev->me_fw->size != me_req_size) { 803 if (rdev->me_fw->size != me_req_size) {
@@ -817,7 +808,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
817 } 808 }
818 809
819 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 810 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
820 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); 811 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
821 if (err) 812 if (err)
822 goto out; 813 goto out;
823 if (rdev->ce_fw->size != ce_req_size) { 814 if (rdev->ce_fw->size != ce_req_size) {
@@ -828,7 +819,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
828 } 819 }
829 820
830 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); 821 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
831 err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev); 822 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
832 if (err) 823 if (err)
833 goto out; 824 goto out;
834 if (rdev->mec_fw->size != mec_req_size) { 825 if (rdev->mec_fw->size != mec_req_size) {
@@ -839,7 +830,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
839 } 830 }
840 831
841 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); 832 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
842 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 833 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
843 if (err) 834 if (err)
844 goto out; 835 goto out;
845 if (rdev->rlc_fw->size != rlc_req_size) { 836 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -850,7 +841,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
850 } 841 }
851 842
852 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 843 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
853 err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev); 844 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
854 if (err) 845 if (err)
855 goto out; 846 goto out;
856 if (rdev->sdma_fw->size != sdma_req_size) { 847 if (rdev->sdma_fw->size != sdma_req_size) {
@@ -863,7 +854,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
863 /* No MC ucode on APUs */ 854 /* No MC ucode on APUs */
864 if (!(rdev->flags & RADEON_IS_IGP)) { 855 if (!(rdev->flags & RADEON_IS_IGP)) {
865 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 856 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
866 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 857 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
867 if (err) 858 if (err)
868 goto out; 859 goto out;
869 if (rdev->mc_fw->size != mc_req_size) { 860 if (rdev->mc_fw->size != mc_req_size) {
@@ -875,8 +866,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
875 } 866 }
876 867
877out: 868out:
878 platform_device_unregister(pdev);
879
880 if (err) { 869 if (err) {
881 if (err != -EINVAL) 870 if (err != -EINVAL)
882 printk(KERN_ERR 871 printk(KERN_ERR
@@ -2598,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
2598 if (rdev->wb.enabled) { 2587 if (rdev->wb.enabled) {
2599 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 2588 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
2600 } else { 2589 } else {
2590 mutex_lock(&rdev->srbm_mutex);
2601 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); 2591 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2602 rptr = RREG32(CP_HQD_PQ_RPTR); 2592 rptr = RREG32(CP_HQD_PQ_RPTR);
2603 cik_srbm_select(rdev, 0, 0, 0, 0); 2593 cik_srbm_select(rdev, 0, 0, 0, 0);
2594 mutex_unlock(&rdev->srbm_mutex);
2604 } 2595 }
2605 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; 2596 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2606 2597
@@ -2615,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
2615 if (rdev->wb.enabled) { 2606 if (rdev->wb.enabled) {
2616 wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); 2607 wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
2617 } else { 2608 } else {
2609 mutex_lock(&rdev->srbm_mutex);
2618 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); 2610 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2619 wptr = RREG32(CP_HQD_PQ_WPTR); 2611 wptr = RREG32(CP_HQD_PQ_WPTR);
2620 cik_srbm_select(rdev, 0, 0, 0, 0); 2612 cik_srbm_select(rdev, 0, 0, 0, 0);
2613 mutex_unlock(&rdev->srbm_mutex);
2621 } 2614 }
2622 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; 2615 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2623 2616
@@ -2908,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2908 WREG32(CP_CPF_DEBUG, tmp); 2901 WREG32(CP_CPF_DEBUG, tmp);
2909 2902
2910 /* init the pipes */ 2903 /* init the pipes */
2904 mutex_lock(&rdev->srbm_mutex);
2911 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { 2905 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
2912 int me = (i < 4) ? 1 : 2; 2906 int me = (i < 4) ? 1 : 2;
2913 int pipe = (i < 4) ? i : (i - 4); 2907 int pipe = (i < 4) ? i : (i - 4);
@@ -2930,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2930 WREG32(CP_HPD_EOP_CONTROL, tmp); 2924 WREG32(CP_HPD_EOP_CONTROL, tmp);
2931 } 2925 }
2932 cik_srbm_select(rdev, 0, 0, 0, 0); 2926 cik_srbm_select(rdev, 0, 0, 0, 0);
2927 mutex_unlock(&rdev->srbm_mutex);
2933 2928
2934 /* init the queues. Just two for now. */ 2929 /* init the queues. Just two for now. */
2935 for (i = 0; i < 2; i++) { 2930 for (i = 0; i < 2; i++) {
@@ -2983,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2983 mqd->static_thread_mgmt23[0] = 0xffffffff; 2978 mqd->static_thread_mgmt23[0] = 0xffffffff;
2984 mqd->static_thread_mgmt23[1] = 0xffffffff; 2979 mqd->static_thread_mgmt23[1] = 0xffffffff;
2985 2980
2981 mutex_lock(&rdev->srbm_mutex);
2986 cik_srbm_select(rdev, rdev->ring[idx].me, 2982 cik_srbm_select(rdev, rdev->ring[idx].me,
2987 rdev->ring[idx].pipe, 2983 rdev->ring[idx].pipe,
2988 rdev->ring[idx].queue, 0); 2984 rdev->ring[idx].queue, 0);
@@ -3110,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
3110 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); 3106 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3111 3107
3112 cik_srbm_select(rdev, 0, 0, 0, 0); 3108 cik_srbm_select(rdev, 0, 0, 0, 0);
3109 mutex_unlock(&rdev->srbm_mutex);
3113 3110
3114 radeon_bo_kunmap(rdev->ring[idx].mqd_obj); 3111 radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
3115 radeon_bo_unreserve(rdev->ring[idx].mqd_obj); 3112 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
@@ -4331,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
4331 4328
4332 /* XXX SH_MEM regs */ 4329 /* XXX SH_MEM regs */
4333 /* where to put LDS, scratch, GPUVM in FSA64 space */ 4330 /* where to put LDS, scratch, GPUVM in FSA64 space */
4331 mutex_lock(&rdev->srbm_mutex);
4334 for (i = 0; i < 16; i++) { 4332 for (i = 0; i < 16; i++) {
4335 cik_srbm_select(rdev, 0, 0, 0, i); 4333 cik_srbm_select(rdev, 0, 0, 0, i);
4336 /* CP and shaders */ 4334 /* CP and shaders */
@@ -4346,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
4346 /* XXX SDMA RLC - todo */ 4344 /* XXX SDMA RLC - todo */
4347 } 4345 }
4348 cik_srbm_select(rdev, 0, 0, 0, 0); 4346 cik_srbm_select(rdev, 0, 0, 0, 0);
4347 mutex_unlock(&rdev->srbm_mutex);
4349 4348
4350 cik_pcie_gart_tlb_flush(rdev); 4349 cik_pcie_gart_tlb_flush(rdev);
4351 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 4350 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -4453,6 +4452,29 @@ void cik_vm_fini(struct radeon_device *rdev)
4453} 4452}
4454 4453
4455/** 4454/**
4455 * cik_vm_decode_fault - print human readable fault info
4456 *
4457 * @rdev: radeon_device pointer
4458 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4459 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4460 *
4461 * Print human readable fault information (CIK).
4462 */
4463static void cik_vm_decode_fault(struct radeon_device *rdev,
4464 u32 status, u32 addr, u32 mc_client)
4465{
4466 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4467 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4468 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4469 char *block = (char *)&mc_client;
4470
4471 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
4472 protections, vmid, addr,
4473 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4474 block, mc_id);
4475}
4476
4477/**
4456 * cik_vm_flush - cik vm flush using the CP 4478 * cik_vm_flush - cik vm flush using the CP
4457 * 4479 *
4458 * @rdev: radeon_device pointer 4480 * @rdev: radeon_device pointer
@@ -5507,6 +5529,7 @@ int cik_irq_process(struct radeon_device *rdev)
5507 u32 ring_index; 5529 u32 ring_index;
5508 bool queue_hotplug = false; 5530 bool queue_hotplug = false;
5509 bool queue_reset = false; 5531 bool queue_reset = false;
5532 u32 addr, status, mc_client;
5510 5533
5511 if (!rdev->ih.enabled || rdev->shutdown) 5534 if (!rdev->ih.enabled || rdev->shutdown)
5512 return IRQ_NONE; 5535 return IRQ_NONE;
@@ -5742,11 +5765,15 @@ restart_ih:
5742 break; 5765 break;
5743 case 146: 5766 case 146:
5744 case 147: 5767 case 147:
5768 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
5769 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
5770 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
5745 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 5771 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
5746 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 5772 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
5747 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 5773 addr);
5748 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 5774 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
5749 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 5775 status);
5776 cik_vm_decode_fault(rdev, status, addr, mc_client);
5750 /* reset addr and status */ 5777 /* reset addr and status */
5751 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 5778 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5752 break; 5779 break;
@@ -5937,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev)
5937 struct radeon_ring *ring; 5964 struct radeon_ring *ring;
5938 int r; 5965 int r;
5939 5966
5967 cik_mc_program(rdev);
5968
5940 if (rdev->flags & RADEON_IS_IGP) { 5969 if (rdev->flags & RADEON_IS_IGP) {
5941 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 5970 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5942 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { 5971 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
@@ -5968,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev)
5968 if (r) 5997 if (r)
5969 return r; 5998 return r;
5970 5999
5971 cik_mc_program(rdev);
5972 r = cik_pcie_gart_enable(rdev); 6000 r = cik_pcie_gart_enable(rdev);
5973 if (r) 6001 if (r)
5974 return r; 6002 return r;
@@ -6177,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev)
6177 radeon_vm_manager_fini(rdev); 6205 radeon_vm_manager_fini(rdev);
6178 cik_cp_enable(rdev, false); 6206 cik_cp_enable(rdev, false);
6179 cik_sdma_enable(rdev, false); 6207 cik_sdma_enable(rdev, false);
6180 r600_uvd_rbc_stop(rdev); 6208 r600_uvd_stop(rdev);
6181 radeon_uvd_suspend(rdev); 6209 radeon_uvd_suspend(rdev);
6182 cik_irq_suspend(rdev); 6210 cik_irq_suspend(rdev);
6183 radeon_wb_disable(rdev); 6211 radeon_wb_disable(rdev);
@@ -6341,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev)
6341 radeon_vm_manager_fini(rdev); 6369 radeon_vm_manager_fini(rdev);
6342 radeon_ib_pool_fini(rdev); 6370 radeon_ib_pool_fini(rdev);
6343 radeon_irq_kms_fini(rdev); 6371 radeon_irq_kms_fini(rdev);
6372 r600_uvd_stop(rdev);
6344 radeon_uvd_fini(rdev); 6373 radeon_uvd_fini(rdev);
6345 cik_pcie_gart_fini(rdev); 6374 cik_pcie_gart_fini(rdev);
6346 r600_vram_scratch_fini(rdev); 6375 r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 63514b95889a..7e9275eaef80 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -136,6 +136,22 @@
136#define VM_INVALIDATE_RESPONSE 0x147c 136#define VM_INVALIDATE_RESPONSE 0x147c
137 137
138#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC 138#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
139#define PROTECTIONS_MASK (0xf << 0)
140#define PROTECTIONS_SHIFT 0
141 /* bit 0: range
142 * bit 1: pde0
143 * bit 2: valid
144 * bit 3: read
145 * bit 4: write
146 */
147#define MEMORY_CLIENT_ID_MASK (0xff << 12)
148#define MEMORY_CLIENT_ID_SHIFT 12
149#define MEMORY_CLIENT_RW_MASK (1 << 24)
150#define MEMORY_CLIENT_RW_SHIFT 24
151#define FAULT_VMID_MASK (0xf << 25)
152#define FAULT_VMID_SHIFT 25
153
154#define VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT 0x14E4
139 155
140#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC 156#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
141 157
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 9bcdd174780f..7e5d0b570a30 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev)
2038{ 2038{
2039 struct rv7xx_power_info *pi; 2039 struct rv7xx_power_info *pi;
2040 struct evergreen_power_info *eg_pi; 2040 struct evergreen_power_info *eg_pi;
2041 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2042 uint16_t data_offset, size;
2043 uint8_t frev, crev;
2044 struct atom_clock_dividers dividers; 2041 struct atom_clock_dividers dividers;
2045 int ret; 2042 int ret;
2046 2043
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
2092 eg_pi->vddci_control = 2089 eg_pi->vddci_control =
2093 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); 2090 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2094 2091
2095 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 2092 rv770_get_engine_memory_ss(rdev);
2096 &frev, &crev, &data_offset)) {
2097 pi->sclk_ss = true;
2098 pi->mclk_ss = true;
2099 pi->dynamic_ss = true;
2100 } else {
2101 pi->sclk_ss = false;
2102 pi->mclk_ss = false;
2103 pi->dynamic_ss = true;
2104 }
2105 2093
2106 pi->asi = RV770_ASI_DFLT; 2094 pi->asi = RV770_ASI_DFLT;
2107 pi->pasi = CYPRESS_HASI_DFLT; 2095 pi->pasi = CYPRESS_HASI_DFLT;
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
2122 2110
2123 pi->dynamic_pcie_gen2 = true; 2111 pi->dynamic_pcie_gen2 = true;
2124 2112
2125 if (pi->gfx_clock_gating && 2113 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2126 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2127 pi->thermal_protection = true; 2114 pi->thermal_protection = true;
2128 else 2115 else
2129 pi->thermal_protection = false; 2116 pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e49059dc9b8f..d5b49e33315e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -139,6 +139,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
139void evergreen_program_aspm(struct radeon_device *rdev); 139void evergreen_program_aspm(struct radeon_device *rdev);
140extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 140extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
141 int ring, u32 cp_int_cntl); 141 int ring, u32 cp_int_cntl);
142extern void cayman_vm_decode_fault(struct radeon_device *rdev,
143 u32 status, u32 addr);
142 144
143static const u32 evergreen_golden_registers[] = 145static const u32 evergreen_golden_registers[] =
144{ 146{
@@ -4586,6 +4588,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
4586 bool queue_hotplug = false; 4588 bool queue_hotplug = false;
4587 bool queue_hdmi = false; 4589 bool queue_hdmi = false;
4588 bool queue_thermal = false; 4590 bool queue_thermal = false;
4591 u32 status, addr;
4589 4592
4590 if (!rdev->ih.enabled || rdev->shutdown) 4593 if (!rdev->ih.enabled || rdev->shutdown)
4591 return IRQ_NONE; 4594 return IRQ_NONE;
@@ -4872,11 +4875,14 @@ restart_ih:
4872 break; 4875 break;
4873 case 146: 4876 case 146:
4874 case 147: 4877 case 147:
4878 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
4879 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
4875 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 4880 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
4876 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 4881 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
4877 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 4882 addr);
4878 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 4883 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4879 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 4884 status);
4885 cayman_vm_decode_fault(rdev, status, addr);
4880 /* reset addr and status */ 4886 /* reset addr and status */
4881 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 4887 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
4882 break; 4888 break;
@@ -5100,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev)
5100 /* enable aspm */ 5106 /* enable aspm */
5101 evergreen_program_aspm(rdev); 5107 evergreen_program_aspm(rdev);
5102 5108
5109 evergreen_mc_program(rdev);
5110
5103 if (ASIC_IS_DCE5(rdev)) { 5111 if (ASIC_IS_DCE5(rdev)) {
5104 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 5112 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
5105 r = ni_init_microcode(rdev); 5113 r = ni_init_microcode(rdev);
@@ -5127,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev)
5127 if (r) 5135 if (r)
5128 return r; 5136 return r;
5129 5137
5130 evergreen_mc_program(rdev);
5131 if (rdev->flags & RADEON_IS_AGP) { 5138 if (rdev->flags & RADEON_IS_AGP) {
5132 evergreen_agp_enable(rdev); 5139 evergreen_agp_enable(rdev);
5133 } else { 5140 } else {
@@ -5285,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev)
5285int evergreen_suspend(struct radeon_device *rdev) 5292int evergreen_suspend(struct radeon_device *rdev)
5286{ 5293{
5287 r600_audio_fini(rdev); 5294 r600_audio_fini(rdev);
5295 r600_uvd_stop(rdev);
5288 radeon_uvd_suspend(rdev); 5296 radeon_uvd_suspend(rdev);
5289 r700_cp_stop(rdev); 5297 r700_cp_stop(rdev);
5290 r600_dma_stop(rdev); 5298 r600_dma_stop(rdev);
5291 r600_uvd_rbc_stop(rdev);
5292 evergreen_irq_suspend(rdev); 5299 evergreen_irq_suspend(rdev);
5293 radeon_wb_disable(rdev); 5300 radeon_wb_disable(rdev);
5294 evergreen_pcie_gart_disable(rdev); 5301 evergreen_pcie_gart_disable(rdev);
@@ -5423,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev)
5423 radeon_ib_pool_fini(rdev); 5430 radeon_ib_pool_fini(rdev);
5424 radeon_irq_kms_fini(rdev); 5431 radeon_irq_kms_fini(rdev);
5425 evergreen_pcie_gart_fini(rdev); 5432 evergreen_pcie_gart_fini(rdev);
5433 r600_uvd_stop(rdev);
5426 radeon_uvd_fini(rdev); 5434 radeon_uvd_fini(rdev);
5427 r600_vram_scratch_fini(rdev); 5435 r600_vram_scratch_fini(rdev);
5428 radeon_gem_fini(rdev); 5436 radeon_gem_fini(rdev);
@@ -5509,6 +5517,9 @@ void evergreen_program_aspm(struct radeon_device *rdev)
5509 */ 5517 */
5510 bool fusion_platform = false; 5518 bool fusion_platform = false;
5511 5519
5520 if (radeon_aspm == 0)
5521 return;
5522
5512 if (!(rdev->flags & RADEON_IS_PCIE)) 5523 if (!(rdev->flags & RADEON_IS_PCIE))
5513 return; 5524 return;
5514 5525
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b9c6f7675e59..b0e280058b9b 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
148 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 148 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
149 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 149 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
150 u32 base_rate = 24000; 150 u32 base_rate = 24000;
151 u32 max_ratio = clock / base_rate;
152 u32 dto_phase;
153 u32 dto_modulo = clock;
154 u32 wallclock_ratio;
155 u32 dto_cntl;
151 156
152 if (!dig || !dig->afmt) 157 if (!dig || !dig->afmt)
153 return; 158 return;
154 159
160 if (max_ratio >= 8) {
161 dto_phase = 192 * 1000;
162 wallclock_ratio = 3;
163 } else if (max_ratio >= 4) {
164 dto_phase = 96 * 1000;
165 wallclock_ratio = 2;
166 } else if (max_ratio >= 2) {
167 dto_phase = 48 * 1000;
168 wallclock_ratio = 1;
169 } else {
170 dto_phase = 24 * 1000;
171 wallclock_ratio = 0;
172 }
173 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
174 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
175 WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
176
155 /* XXX two dtos; generally use dto0 for hdmi */ 177 /* XXX two dtos; generally use dto0 for hdmi */
156 /* Express [24MHz / target pixel clock] as an exact rational 178 /* Express [24MHz / target pixel clock] as an exact rational
157 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 179 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
158 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 180 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
159 */ 181 */
160 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
161 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
162 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); 182 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
183 WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
184 WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
163} 185}
164 186
165 187
@@ -177,6 +199,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
177 uint32_t offset; 199 uint32_t offset;
178 ssize_t err; 200 ssize_t err;
179 201
202 if (!dig || !dig->afmt)
203 return;
204
180 /* Silent, r600_hdmi_enable will raise WARN for us */ 205 /* Silent, r600_hdmi_enable will raise WARN for us */
181 if (!dig->afmt->enabled) 206 if (!dig->afmt->enabled)
182 return; 207 return;
@@ -280,6 +305,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
280 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 305 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
281 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 306 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
282 307
308 if (!dig || !dig->afmt)
309 return;
310
283 /* Silent, r600_hdmi_enable will raise WARN for us */ 311 /* Silent, r600_hdmi_enable will raise WARN for us */
284 if (enable && dig->afmt->enabled) 312 if (enable && dig->afmt->enabled)
285 return; 313 return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a7baf67aef6c..0d582ac1dc31 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -497,6 +497,9 @@
497#define DCCG_AUDIO_DTO0_MODULE 0x05b4 497#define DCCG_AUDIO_DTO0_MODULE 0x05b4
498#define DCCG_AUDIO_DTO0_LOAD 0x05b8 498#define DCCG_AUDIO_DTO0_LOAD 0x05b8
499#define DCCG_AUDIO_DTO0_CNTL 0x05bc 499#define DCCG_AUDIO_DTO0_CNTL 0x05bc
500# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
501# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
502# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
500 503
501#define DCCG_AUDIO_DTO1_PHASE 0x05c0 504#define DCCG_AUDIO_DTO1_PHASE 0x05c0
502#define DCCG_AUDIO_DTO1_MODULE 0x05c4 505#define DCCG_AUDIO_DTO1_MODULE 0x05c4
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index f30127cb30ef..ccb4f8b54852 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -22,7 +22,6 @@
22 * Authors: Alex Deucher 22 * Authors: Alex Deucher
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <drm/drmP.h> 27#include <drm/drmP.h>
@@ -684,7 +683,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
684 683
685int ni_init_microcode(struct radeon_device *rdev) 684int ni_init_microcode(struct radeon_device *rdev)
686{ 685{
687 struct platform_device *pdev;
688 const char *chip_name; 686 const char *chip_name;
689 const char *rlc_chip_name; 687 const char *rlc_chip_name;
690 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 688 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
@@ -694,13 +692,6 @@ int ni_init_microcode(struct radeon_device *rdev)
694 692
695 DRM_DEBUG("\n"); 693 DRM_DEBUG("\n");
696 694
697 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
698 err = IS_ERR(pdev);
699 if (err) {
700 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
701 return -EINVAL;
702 }
703
704 switch (rdev->family) { 695 switch (rdev->family) {
705 case CHIP_BARTS: 696 case CHIP_BARTS:
706 chip_name = "BARTS"; 697 chip_name = "BARTS";
@@ -753,7 +744,7 @@ int ni_init_microcode(struct radeon_device *rdev)
753 DRM_INFO("Loading %s Microcode\n", chip_name); 744 DRM_INFO("Loading %s Microcode\n", chip_name);
754 745
755 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 746 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
756 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 747 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
757 if (err) 748 if (err)
758 goto out; 749 goto out;
759 if (rdev->pfp_fw->size != pfp_req_size) { 750 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -765,7 +756,7 @@ int ni_init_microcode(struct radeon_device *rdev)
765 } 756 }
766 757
767 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 758 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
768 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 759 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
769 if (err) 760 if (err)
770 goto out; 761 goto out;
771 if (rdev->me_fw->size != me_req_size) { 762 if (rdev->me_fw->size != me_req_size) {
@@ -776,7 +767,7 @@ int ni_init_microcode(struct radeon_device *rdev)
776 } 767 }
777 768
778 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 769 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
779 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 770 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
780 if (err) 771 if (err)
781 goto out; 772 goto out;
782 if (rdev->rlc_fw->size != rlc_req_size) { 773 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -789,7 +780,7 @@ int ni_init_microcode(struct radeon_device *rdev)
789 /* no MC ucode on TN */ 780 /* no MC ucode on TN */
790 if (!(rdev->flags & RADEON_IS_IGP)) { 781 if (!(rdev->flags & RADEON_IS_IGP)) {
791 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 782 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
792 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 783 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
793 if (err) 784 if (err)
794 goto out; 785 goto out;
795 if (rdev->mc_fw->size != mc_req_size) { 786 if (rdev->mc_fw->size != mc_req_size) {
@@ -802,10 +793,14 @@ int ni_init_microcode(struct radeon_device *rdev)
802 793
803 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { 794 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
804 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 795 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
805 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); 796 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
806 if (err) 797 if (err) {
807 goto out; 798 printk(KERN_ERR
808 if (rdev->smc_fw->size != smc_req_size) { 799 "smc: error loading firmware \"%s\"\n",
800 fw_name);
801 release_firmware(rdev->smc_fw);
802 rdev->smc_fw = NULL;
803 } else if (rdev->smc_fw->size != smc_req_size) {
809 printk(KERN_ERR 804 printk(KERN_ERR
810 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 805 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
811 rdev->mc_fw->size, fw_name); 806 rdev->mc_fw->size, fw_name);
@@ -814,8 +809,6 @@ int ni_init_microcode(struct radeon_device *rdev)
814 } 809 }
815 810
816out: 811out:
817 platform_device_unregister(pdev);
818
819 if (err) { 812 if (err) {
820 if (err != -EINVAL) 813 if (err != -EINVAL)
821 printk(KERN_ERR 814 printk(KERN_ERR
@@ -2090,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev)
2090 /* enable aspm */ 2083 /* enable aspm */
2091 evergreen_program_aspm(rdev); 2084 evergreen_program_aspm(rdev);
2092 2085
2086 evergreen_mc_program(rdev);
2087
2093 if (rdev->flags & RADEON_IS_IGP) { 2088 if (rdev->flags & RADEON_IS_IGP) {
2094 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2089 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2095 r = ni_init_microcode(rdev); 2090 r = ni_init_microcode(rdev);
@@ -2118,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev)
2118 if (r) 2113 if (r)
2119 return r; 2114 return r;
2120 2115
2121 evergreen_mc_program(rdev);
2122 r = cayman_pcie_gart_enable(rdev); 2116 r = cayman_pcie_gart_enable(rdev);
2123 if (r) 2117 if (r)
2124 return r; 2118 return r;
@@ -2297,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev)
2297 radeon_vm_manager_fini(rdev); 2291 radeon_vm_manager_fini(rdev);
2298 cayman_cp_enable(rdev, false); 2292 cayman_cp_enable(rdev, false);
2299 cayman_dma_stop(rdev); 2293 cayman_dma_stop(rdev);
2300 r600_uvd_rbc_stop(rdev); 2294 r600_uvd_stop(rdev);
2301 radeon_uvd_suspend(rdev); 2295 radeon_uvd_suspend(rdev);
2302 evergreen_irq_suspend(rdev); 2296 evergreen_irq_suspend(rdev);
2303 radeon_wb_disable(rdev); 2297 radeon_wb_disable(rdev);
@@ -2429,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev)
2429 radeon_vm_manager_fini(rdev); 2423 radeon_vm_manager_fini(rdev);
2430 radeon_ib_pool_fini(rdev); 2424 radeon_ib_pool_fini(rdev);
2431 radeon_irq_kms_fini(rdev); 2425 radeon_irq_kms_fini(rdev);
2426 r600_uvd_stop(rdev);
2432 radeon_uvd_fini(rdev); 2427 radeon_uvd_fini(rdev);
2433 cayman_pcie_gart_fini(rdev); 2428 cayman_pcie_gart_fini(rdev);
2434 r600_vram_scratch_fini(rdev); 2429 r600_vram_scratch_fini(rdev);
@@ -2461,6 +2456,167 @@ void cayman_vm_fini(struct radeon_device *rdev)
2461{ 2456{
2462} 2457}
2463 2458
2459/**
2460 * cayman_vm_decode_fault - print human readable fault info
2461 *
2462 * @rdev: radeon_device pointer
2463 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2464 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2465 *
2466 * Print human readable fault information (cayman/TN).
2467 */
2468void cayman_vm_decode_fault(struct radeon_device *rdev,
2469 u32 status, u32 addr)
2470{
2471 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2472 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2473 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2474 char *block;
2475
2476 switch (mc_id) {
2477 case 32:
2478 case 16:
2479 case 96:
2480 case 80:
2481 case 160:
2482 case 144:
2483 case 224:
2484 case 208:
2485 block = "CB";
2486 break;
2487 case 33:
2488 case 17:
2489 case 97:
2490 case 81:
2491 case 161:
2492 case 145:
2493 case 225:
2494 case 209:
2495 block = "CB_FMASK";
2496 break;
2497 case 34:
2498 case 18:
2499 case 98:
2500 case 82:
2501 case 162:
2502 case 146:
2503 case 226:
2504 case 210:
2505 block = "CB_CMASK";
2506 break;
2507 case 35:
2508 case 19:
2509 case 99:
2510 case 83:
2511 case 163:
2512 case 147:
2513 case 227:
2514 case 211:
2515 block = "CB_IMMED";
2516 break;
2517 case 36:
2518 case 20:
2519 case 100:
2520 case 84:
2521 case 164:
2522 case 148:
2523 case 228:
2524 case 212:
2525 block = "DB";
2526 break;
2527 case 37:
2528 case 21:
2529 case 101:
2530 case 85:
2531 case 165:
2532 case 149:
2533 case 229:
2534 case 213:
2535 block = "DB_HTILE";
2536 break;
2537 case 38:
2538 case 22:
2539 case 102:
2540 case 86:
2541 case 166:
2542 case 150:
2543 case 230:
2544 case 214:
2545 block = "SX";
2546 break;
2547 case 39:
2548 case 23:
2549 case 103:
2550 case 87:
2551 case 167:
2552 case 151:
2553 case 231:
2554 case 215:
2555 block = "DB_STEN";
2556 break;
2557 case 40:
2558 case 24:
2559 case 104:
2560 case 88:
2561 case 232:
2562 case 216:
2563 case 168:
2564 case 152:
2565 block = "TC_TFETCH";
2566 break;
2567 case 41:
2568 case 25:
2569 case 105:
2570 case 89:
2571 case 233:
2572 case 217:
2573 case 169:
2574 case 153:
2575 block = "TC_VFETCH";
2576 break;
2577 case 42:
2578 case 26:
2579 case 106:
2580 case 90:
2581 case 234:
2582 case 218:
2583 case 170:
2584 case 154:
2585 block = "VC";
2586 break;
2587 case 112:
2588 block = "CP";
2589 break;
2590 case 113:
2591 case 114:
2592 block = "SH";
2593 break;
2594 case 115:
2595 block = "VGT";
2596 break;
2597 case 178:
2598 block = "IH";
2599 break;
2600 case 51:
2601 block = "RLC";
2602 break;
2603 case 55:
2604 block = "DMA";
2605 break;
2606 case 56:
2607 block = "HDP";
2608 break;
2609 default:
2610 block = "unknown";
2611 break;
2612 }
2613
2614 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2615 protections, vmid, addr,
2616 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2617 block, mc_id);
2618}
2619
2464#define R600_ENTRY_VALID (1 << 0) 2620#define R600_ENTRY_VALID (1 << 0)
2465#define R600_PTE_SYSTEM (1 << 1) 2621#define R600_PTE_SYSTEM (1 << 1)
2466#define R600_PTE_SNOOPED (1 << 2) 2622#define R600_PTE_SNOOPED (1 << 2)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 559cf24d51af..f0f5f748938a 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1054,10 +1054,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd
1054int ni_dpm_force_performance_level(struct radeon_device *rdev, 1054int ni_dpm_force_performance_level(struct radeon_device *rdev,
1055 enum radeon_dpm_forced_level level) 1055 enum radeon_dpm_forced_level level)
1056{ 1056{
1057 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
1058 struct ni_ps *ps = ni_get_ps(rps);
1059 u32 levels;
1060
1061 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1057 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1062 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) 1058 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
1063 return -EINVAL; 1059 return -EINVAL;
@@ -1068,8 +1064,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
1068 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 1064 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
1069 return -EINVAL; 1065 return -EINVAL;
1070 1066
1071 levels = ps->performance_level_count - 1; 1067 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
1072 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
1073 return -EINVAL; 1068 return -EINVAL;
1074 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1069 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1075 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 1070 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
@@ -4072,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev)
4072 struct rv7xx_power_info *pi; 4067 struct rv7xx_power_info *pi;
4073 struct evergreen_power_info *eg_pi; 4068 struct evergreen_power_info *eg_pi;
4074 struct ni_power_info *ni_pi; 4069 struct ni_power_info *ni_pi;
4075 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4076 u16 data_offset, size;
4077 u8 frev, crev;
4078 struct atom_clock_dividers dividers; 4070 struct atom_clock_dividers dividers;
4079 int ret; 4071 int ret;
4080 4072
@@ -4167,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev)
4167 eg_pi->vddci_control = 4159 eg_pi->vddci_control =
4168 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); 4160 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
4169 4161
4170 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 4162 rv770_get_engine_memory_ss(rdev);
4171 &frev, &crev, &data_offset)) {
4172 pi->sclk_ss = true;
4173 pi->mclk_ss = true;
4174 pi->dynamic_ss = true;
4175 } else {
4176 pi->sclk_ss = false;
4177 pi->mclk_ss = false;
4178 pi->dynamic_ss = true;
4179 }
4180 4163
4181 pi->asi = RV770_ASI_DFLT; 4164 pi->asi = RV770_ASI_DFLT;
4182 pi->pasi = CYPRESS_HASI_DFLT; 4165 pi->pasi = CYPRESS_HASI_DFLT;
@@ -4193,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev)
4193 4176
4194 pi->dynamic_pcie_gen2 = true; 4177 pi->dynamic_pcie_gen2 = true;
4195 4178
4196 if (pi->gfx_clock_gating && 4179 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
4197 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
4198 pi->thermal_protection = true; 4180 pi->thermal_protection = true;
4199 else 4181 else
4200 pi->thermal_protection = false; 4182 pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index fe24a93542ec..22421bc80c0d 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -133,6 +133,22 @@
133#define VM_CONTEXT1_CNTL2 0x1434 133#define VM_CONTEXT1_CNTL2 0x1434
134#define VM_INVALIDATE_REQUEST 0x1478 134#define VM_INVALIDATE_REQUEST 0x1478
135#define VM_INVALIDATE_RESPONSE 0x147c 135#define VM_INVALIDATE_RESPONSE 0x147c
136#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
137#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
138#define PROTECTIONS_MASK (0xf << 0)
139#define PROTECTIONS_SHIFT 0
140 /* bit 0: range
141 * bit 2: pde0
142 * bit 3: valid
143 * bit 4: read
144 * bit 5: write
145 */
146#define MEMORY_CLIENT_ID_MASK (0xff << 12)
147#define MEMORY_CLIENT_ID_SHIFT 12
148#define MEMORY_CLIENT_RW_MASK (1 << 24)
149#define MEMORY_CLIENT_RW_SHIFT 24
150#define FAULT_VMID_MASK (0x7 << 25)
151#define FAULT_VMID_SHIFT 25
136#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 152#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
137#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c 153#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
138#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C 154#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9affefd79f6..75349cdaa84b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -39,7 +39,6 @@
39#include "atom.h" 39#include "atom.h"
40 40
41#include <linux/firmware.h> 41#include <linux/firmware.h>
42#include <linux/platform_device.h>
43#include <linux/module.h> 42#include <linux/module.h>
44 43
45#include "r100_reg_safe.h" 44#include "r100_reg_safe.h"
@@ -989,18 +988,11 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
989/* Load the microcode for the CP */ 988/* Load the microcode for the CP */
990static int r100_cp_init_microcode(struct radeon_device *rdev) 989static int r100_cp_init_microcode(struct radeon_device *rdev)
991{ 990{
992 struct platform_device *pdev;
993 const char *fw_name = NULL; 991 const char *fw_name = NULL;
994 int err; 992 int err;
995 993
996 DRM_DEBUG_KMS("\n"); 994 DRM_DEBUG_KMS("\n");
997 995
998 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
999 err = IS_ERR(pdev);
1000 if (err) {
1001 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1002 return -EINVAL;
1003 }
1004 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 996 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
1005 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 997 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
1006 (rdev->family == CHIP_RS200)) { 998 (rdev->family == CHIP_RS200)) {
@@ -1042,8 +1034,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
1042 fw_name = FIRMWARE_R520; 1034 fw_name = FIRMWARE_R520;
1043 } 1035 }
1044 1036
1045 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 1037 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1046 platform_device_unregister(pdev);
1047 if (err) { 1038 if (err) {
1048 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1039 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1049 fw_name); 1040 fw_name);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2d3655f7f41e..e66e72077350 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -28,7 +28,6 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/firmware.h> 30#include <linux/firmware.h>
31#include <linux/platform_device.h>
32#include <linux/module.h> 31#include <linux/module.h>
33#include <drm/drmP.h> 32#include <drm/drmP.h>
34#include <drm/radeon_drm.h> 33#include <drm/radeon_drm.h>
@@ -2144,7 +2143,6 @@ void r600_cp_stop(struct radeon_device *rdev)
2144 2143
2145int r600_init_microcode(struct radeon_device *rdev) 2144int r600_init_microcode(struct radeon_device *rdev)
2146{ 2145{
2147 struct platform_device *pdev;
2148 const char *chip_name; 2146 const char *chip_name;
2149 const char *rlc_chip_name; 2147 const char *rlc_chip_name;
2150 const char *smc_chip_name = "RV770"; 2148 const char *smc_chip_name = "RV770";
@@ -2154,13 +2152,6 @@ int r600_init_microcode(struct radeon_device *rdev)
2154 2152
2155 DRM_DEBUG("\n"); 2153 DRM_DEBUG("\n");
2156 2154
2157 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
2158 err = IS_ERR(pdev);
2159 if (err) {
2160 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
2161 return -EINVAL;
2162 }
2163
2164 switch (rdev->family) { 2155 switch (rdev->family) {
2165 case CHIP_R600: 2156 case CHIP_R600:
2166 chip_name = "R600"; 2157 chip_name = "R600";
@@ -2272,7 +2263,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2272 DRM_INFO("Loading %s Microcode\n", chip_name); 2263 DRM_INFO("Loading %s Microcode\n", chip_name);
2273 2264
2274 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 2265 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2275 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 2266 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2276 if (err) 2267 if (err)
2277 goto out; 2268 goto out;
2278 if (rdev->pfp_fw->size != pfp_req_size) { 2269 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -2284,7 +2275,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2284 } 2275 }
2285 2276
2286 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 2277 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2287 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 2278 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2288 if (err) 2279 if (err)
2289 goto out; 2280 goto out;
2290 if (rdev->me_fw->size != me_req_size) { 2281 if (rdev->me_fw->size != me_req_size) {
@@ -2295,7 +2286,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2295 } 2286 }
2296 2287
2297 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 2288 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2298 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 2289 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2299 if (err) 2290 if (err)
2300 goto out; 2291 goto out;
2301 if (rdev->rlc_fw->size != rlc_req_size) { 2292 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -2307,10 +2298,14 @@ int r600_init_microcode(struct radeon_device *rdev)
2307 2298
2308 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { 2299 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2309 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); 2300 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2310 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); 2301 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2311 if (err) 2302 if (err) {
2312 goto out; 2303 printk(KERN_ERR
2313 if (rdev->smc_fw->size != smc_req_size) { 2304 "smc: error loading firmware \"%s\"\n",
2305 fw_name);
2306 release_firmware(rdev->smc_fw);
2307 rdev->smc_fw = NULL;
2308 } else if (rdev->smc_fw->size != smc_req_size) {
2314 printk(KERN_ERR 2309 printk(KERN_ERR
2315 "smc: Bogus length %zu in firmware \"%s\"\n", 2310 "smc: Bogus length %zu in firmware \"%s\"\n",
2316 rdev->smc_fw->size, fw_name); 2311 rdev->smc_fw->size, fw_name);
@@ -2319,8 +2314,6 @@ int r600_init_microcode(struct radeon_device *rdev)
2319 } 2314 }
2320 2315
2321out: 2316out:
2322 platform_device_unregister(pdev);
2323
2324 if (err) { 2317 if (err) {
2325 if (err != -EINVAL) 2318 if (err != -EINVAL)
2326 printk(KERN_ERR 2319 printk(KERN_ERR
@@ -2708,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
2708 return 0; 2701 return 0;
2709} 2702}
2710 2703
2711void r600_uvd_rbc_stop(struct radeon_device *rdev) 2704void r600_uvd_stop(struct radeon_device *rdev)
2712{ 2705{
2713 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 2706 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2714 2707
2715 /* force RBC into idle state */ 2708 /* force RBC into idle state */
2716 WREG32(UVD_RBC_RB_CNTL, 0x11010101); 2709 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2710
2711 /* Stall UMC and register bus before resetting VCPU */
2712 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2713 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2714 mdelay(1);
2715
2716 /* put VCPU into reset */
2717 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2718 mdelay(5);
2719
2720 /* disable VCPU clock */
2721 WREG32(UVD_VCPU_CNTL, 0x0);
2722
2723 /* Unstall UMC and register bus */
2724 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
2725 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
2726
2717 ring->ready = false; 2727 ring->ready = false;
2718} 2728}
2719 2729
@@ -2733,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev)
2733 /* disable interupt */ 2743 /* disable interupt */
2734 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); 2744 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
2735 2745
2746 /* Stall UMC and register bus before resetting VCPU */
2747 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2748 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2749 mdelay(1);
2750
2736 /* put LMI, VCPU, RBC etc... into reset */ 2751 /* put LMI, VCPU, RBC etc... into reset */
2737 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | 2752 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
2738 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | 2753 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
@@ -2762,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev)
2762 WREG32(UVD_MPC_SET_ALU, 0); 2777 WREG32(UVD_MPC_SET_ALU, 0);
2763 WREG32(UVD_MPC_SET_MUX, 0x88); 2778 WREG32(UVD_MPC_SET_MUX, 0x88);
2764 2779
2765 /* Stall UMC */
2766 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2767 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2768
2769 /* take all subblocks out of reset, except VCPU */ 2780 /* take all subblocks out of reset, except VCPU */
2770 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); 2781 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2771 mdelay(5); 2782 mdelay(5);
@@ -3019,7 +3030,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev,
3019 struct radeon_fence *fence) 3030 struct radeon_fence *fence)
3020{ 3031{
3021 struct radeon_ring *ring = &rdev->ring[fence->ring]; 3032 struct radeon_ring *ring = &rdev->ring[fence->ring];
3022 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; 3033 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
3023 3034
3024 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 3035 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
3025 radeon_ring_write(ring, fence->seq); 3036 radeon_ring_write(ring, fence->seq);
@@ -3145,6 +3156,90 @@ int r600_copy_blit(struct radeon_device *rdev,
3145} 3156}
3146 3157
3147/** 3158/**
3159 * r600_copy_cpdma - copy pages using the CP DMA engine
3160 *
3161 * @rdev: radeon_device pointer
3162 * @src_offset: src GPU address
3163 * @dst_offset: dst GPU address
3164 * @num_gpu_pages: number of GPU pages to xfer
3165 * @fence: radeon fence object
3166 *
3167 * Copy GPU paging using the CP DMA engine (r6xx+).
3168 * Used by the radeon ttm implementation to move pages if
3169 * registered as the asic copy callback.
3170 */
3171int r600_copy_cpdma(struct radeon_device *rdev,
3172 uint64_t src_offset, uint64_t dst_offset,
3173 unsigned num_gpu_pages,
3174 struct radeon_fence **fence)
3175{
3176 struct radeon_semaphore *sem = NULL;
3177 int ring_index = rdev->asic->copy.blit_ring_index;
3178 struct radeon_ring *ring = &rdev->ring[ring_index];
3179 u32 size_in_bytes, cur_size_in_bytes, tmp;
3180 int i, num_loops;
3181 int r = 0;
3182
3183 r = radeon_semaphore_create(rdev, &sem);
3184 if (r) {
3185 DRM_ERROR("radeon: moving bo (%d).\n", r);
3186 return r;
3187 }
3188
3189 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3190 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3191 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
3192 if (r) {
3193 DRM_ERROR("radeon: moving bo (%d).\n", r);
3194 radeon_semaphore_free(rdev, &sem, NULL);
3195 return r;
3196 }
3197
3198 if (radeon_fence_need_sync(*fence, ring->idx)) {
3199 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3200 ring->idx);
3201 radeon_fence_note_sync(*fence, ring->idx);
3202 } else {
3203 radeon_semaphore_free(rdev, &sem, NULL);
3204 }
3205
3206 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3207 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3208 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
3209 for (i = 0; i < num_loops; i++) {
3210 cur_size_in_bytes = size_in_bytes;
3211 if (cur_size_in_bytes > 0x1fffff)
3212 cur_size_in_bytes = 0x1fffff;
3213 size_in_bytes -= cur_size_in_bytes;
3214 tmp = upper_32_bits(src_offset) & 0xff;
3215 if (size_in_bytes == 0)
3216 tmp |= PACKET3_CP_DMA_CP_SYNC;
3217 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
3218 radeon_ring_write(ring, src_offset & 0xffffffff);
3219 radeon_ring_write(ring, tmp);
3220 radeon_ring_write(ring, dst_offset & 0xffffffff);
3221 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3222 radeon_ring_write(ring, cur_size_in_bytes);
3223 src_offset += cur_size_in_bytes;
3224 dst_offset += cur_size_in_bytes;
3225 }
3226 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3227 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3228 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
3229
3230 r = radeon_fence_emit(rdev, fence, ring->idx);
3231 if (r) {
3232 radeon_ring_unlock_undo(rdev, ring);
3233 return r;
3234 }
3235
3236 radeon_ring_unlock_commit(rdev, ring);
3237 radeon_semaphore_free(rdev, &sem, *fence);
3238
3239 return r;
3240}
3241
3242/**
3148 * r600_copy_dma - copy pages using the DMA engine 3243 * r600_copy_dma - copy pages using the DMA engine
3149 * 3244 *
3150 * @rdev: radeon_device pointer 3245 * @rdev: radeon_device pointer
@@ -3239,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev)
3239 /* enable pcie gen2 link */ 3334 /* enable pcie gen2 link */
3240 r600_pcie_gen2_enable(rdev); 3335 r600_pcie_gen2_enable(rdev);
3241 3336
3337 r600_mc_program(rdev);
3338
3242 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 3339 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3243 r = r600_init_microcode(rdev); 3340 r = r600_init_microcode(rdev);
3244 if (r) { 3341 if (r) {
@@ -3251,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev)
3251 if (r) 3348 if (r)
3252 return r; 3349 return r;
3253 3350
3254 r600_mc_program(rdev);
3255 if (rdev->flags & RADEON_IS_AGP) { 3351 if (rdev->flags & RADEON_IS_AGP) {
3256 r600_agp_enable(rdev); 3352 r600_agp_enable(rdev);
3257 } else { 3353 } else {
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b88f54b134ab..e5c860f4ccbe 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev)
278void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) 278void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
279{ 279{
280 if (enable) 280 if (enable)
281 WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); 281 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
282 else 282 else
283 WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 283 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
284} 284}
285 285
286void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) 286void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e73b2a73494a..f264df5470f7 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
226 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 226 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
227 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 227 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
228 u32 base_rate = 24000; 228 u32 base_rate = 24000;
229 u32 max_ratio = clock / base_rate;
230 u32 dto_phase;
231 u32 dto_modulo = clock;
232 u32 wallclock_ratio;
233 u32 dto_cntl;
229 234
230 if (!dig || !dig->afmt) 235 if (!dig || !dig->afmt)
231 return; 236 return;
232 237
238 if (max_ratio >= 8) {
239 dto_phase = 192 * 1000;
240 wallclock_ratio = 3;
241 } else if (max_ratio >= 4) {
242 dto_phase = 96 * 1000;
243 wallclock_ratio = 2;
244 } else if (max_ratio >= 2) {
245 dto_phase = 48 * 1000;
246 wallclock_ratio = 1;
247 } else {
248 dto_phase = 24 * 1000;
249 wallclock_ratio = 0;
250 }
251
233 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. 252 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
234 * doesn't matter which one you use. Just use the first one. 253 * doesn't matter which one you use. Just use the first one.
235 */ 254 */
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
242 /* according to the reg specs, this should DCE3.2 only, but in 261 /* according to the reg specs, this should DCE3.2 only, but in
243 * practice it seems to cover DCE3.0 as well. 262 * practice it seems to cover DCE3.0 as well.
244 */ 263 */
245 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); 264 if (dig->dig_encoder == 0) {
246 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); 265 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
247 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ 266 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
267 WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
268 WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
269 WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
270 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
271 } else {
272 dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
273 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
274 WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
275 WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
276 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
277 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
278 }
248 } else { 279 } else {
249 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 280 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
250 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | 281 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
@@ -266,6 +297,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
266 uint32_t offset; 297 uint32_t offset;
267 ssize_t err; 298 ssize_t err;
268 299
300 if (!dig || !dig->afmt)
301 return;
302
269 /* Silent, r600_hdmi_enable will raise WARN for us */ 303 /* Silent, r600_hdmi_enable will raise WARN for us */
270 if (!dig->afmt->enabled) 304 if (!dig->afmt->enabled)
271 return; 305 return;
@@ -448,6 +482,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
448 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 482 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
449 u32 hdmi = HDMI0_ERROR_ACK; 483 u32 hdmi = HDMI0_ERROR_ACK;
450 484
485 if (!dig || !dig->afmt)
486 return;
487
451 /* Silent, r600_hdmi_enable will raise WARN for us */ 488 /* Silent, r600_hdmi_enable will raise WARN for us */
452 if (enable && dig->afmt->enabled) 489 if (enable && dig->afmt->enabled)
453 return; 490 return;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f1b3084d8f51..7c780839a7f4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -602,6 +602,7 @@
602#define L2_BUSY (1 << 0) 602#define L2_BUSY (1 << 0)
603 603
604#define WAIT_UNTIL 0x8040 604#define WAIT_UNTIL 0x8040
605#define WAIT_CP_DMA_IDLE_bit (1 << 8)
605#define WAIT_2D_IDLE_bit (1 << 14) 606#define WAIT_2D_IDLE_bit (1 << 14)
606#define WAIT_3D_IDLE_bit (1 << 15) 607#define WAIT_3D_IDLE_bit (1 << 15)
607#define WAIT_2D_IDLECLEAN_bit (1 << 16) 608#define WAIT_2D_IDLECLEAN_bit (1 << 16)
@@ -932,6 +933,9 @@
932#define DCCG_AUDIO_DTO0_LOAD 0x051c 933#define DCCG_AUDIO_DTO0_LOAD 0x051c
933# define DTO_LOAD (1 << 31) 934# define DTO_LOAD (1 << 31)
934#define DCCG_AUDIO_DTO0_CNTL 0x0520 935#define DCCG_AUDIO_DTO0_CNTL 0x0520
936# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
937# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
938# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
935 939
936#define DCCG_AUDIO_DTO1_PHASE 0x0524 940#define DCCG_AUDIO_DTO1_PHASE 0x0524
937#define DCCG_AUDIO_DTO1_MODULE 0x0528 941#define DCCG_AUDIO_DTO1_MODULE 0x0528
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9b7025d02cd0..274b8e1b889f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -97,6 +97,7 @@ extern int radeon_msi;
97extern int radeon_lockup_timeout; 97extern int radeon_lockup_timeout;
98extern int radeon_fastfb; 98extern int radeon_fastfb;
99extern int radeon_dpm; 99extern int radeon_dpm;
100extern int radeon_aspm;
100 101
101/* 102/*
102 * Copy from radeon_drv.h so we don't have to include both and have conflicting 103 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -455,6 +456,7 @@ struct radeon_sa_manager {
455 uint64_t gpu_addr; 456 uint64_t gpu_addr;
456 void *cpu_ptr; 457 void *cpu_ptr;
457 uint32_t domain; 458 uint32_t domain;
459 uint32_t align;
458}; 460};
459 461
460struct radeon_sa_bo; 462struct radeon_sa_bo;
@@ -783,6 +785,11 @@ struct radeon_mec {
783/* number of entries in page table */ 785/* number of entries in page table */
784#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) 786#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
785 787
788/* PTBs (Page Table Blocks) need to be aligned to 32K */
789#define RADEON_VM_PTB_ALIGN_SIZE 32768
790#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
791#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
792
786struct radeon_vm { 793struct radeon_vm {
787 struct list_head list; 794 struct list_head list;
788 struct list_head va; 795 struct list_head va;
@@ -1460,6 +1467,7 @@ struct radeon_uvd {
1460 struct radeon_bo *vcpu_bo; 1467 struct radeon_bo *vcpu_bo;
1461 void *cpu_addr; 1468 void *cpu_addr;
1462 uint64_t gpu_addr; 1469 uint64_t gpu_addr;
1470 void *saved_bo;
1463 atomic_t handles[RADEON_MAX_UVD_HANDLES]; 1471 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1464 struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; 1472 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1465 struct delayed_work idle_work; 1473 struct delayed_work idle_work;
@@ -2054,10 +2062,10 @@ struct radeon_device {
2054 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 2062 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
2055 const struct firmware *mc_fw; /* NI MC firmware */ 2063 const struct firmware *mc_fw; /* NI MC firmware */
2056 const struct firmware *ce_fw; /* SI CE firmware */ 2064 const struct firmware *ce_fw; /* SI CE firmware */
2057 const struct firmware *uvd_fw; /* UVD firmware */
2058 const struct firmware *mec_fw; /* CIK MEC firmware */ 2065 const struct firmware *mec_fw; /* CIK MEC firmware */
2059 const struct firmware *sdma_fw; /* CIK SDMA firmware */ 2066 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2060 const struct firmware *smc_fw; /* SMC firmware */ 2067 const struct firmware *smc_fw; /* SMC firmware */
2068 const struct firmware *uvd_fw; /* UVD firmware */
2061 struct r600_blit r600_blit; 2069 struct r600_blit r600_blit;
2062 struct r600_vram_scratch vram_scratch; 2070 struct r600_vram_scratch vram_scratch;
2063 int msi_enabled; /* msi enabled */ 2071 int msi_enabled; /* msi enabled */
@@ -2087,6 +2095,8 @@ struct radeon_device {
2087 /* ACPI interface */ 2095 /* ACPI interface */
2088 struct radeon_atif atif; 2096 struct radeon_atif atif;
2089 struct radeon_atcs atcs; 2097 struct radeon_atcs atcs;
2098 /* srbm instance registers */
2099 struct mutex srbm_mutex;
2090}; 2100};
2091 2101
2092int radeon_device_init(struct radeon_device *rdev, 2102int radeon_device_init(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 097077499cc6..f8f8b3113ddd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1026,8 +1026,8 @@ static struct radeon_asic r600_asic = {
1026 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1026 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1027 .dma = &r600_copy_dma, 1027 .dma = &r600_copy_dma,
1028 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1028 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1029 .copy = &r600_copy_dma, 1029 .copy = &r600_copy_cpdma,
1030 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1030 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1031 }, 1031 },
1032 .surface = { 1032 .surface = {
1033 .set_reg = r600_set_surface_reg, 1033 .set_reg = r600_set_surface_reg,
@@ -1119,8 +1119,8 @@ static struct radeon_asic rv6xx_asic = {
1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1120 .dma = &r600_copy_dma, 1120 .dma = &r600_copy_dma,
1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1122 .copy = &r600_copy_dma, 1122 .copy = &r600_copy_cpdma,
1123 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1123 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1124 }, 1124 },
1125 .surface = { 1125 .surface = {
1126 .set_reg = r600_set_surface_reg, 1126 .set_reg = r600_set_surface_reg,
@@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = {
1161 .get_mclk = &rv6xx_dpm_get_mclk, 1161 .get_mclk = &rv6xx_dpm_get_mclk,
1162 .print_power_state = &rv6xx_dpm_print_power_state, 1162 .print_power_state = &rv6xx_dpm_print_power_state,
1163 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, 1163 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
1164 .force_performance_level = &rv6xx_dpm_force_performance_level,
1164 }, 1165 },
1165 .pflip = { 1166 .pflip = {
1166 .pre_page_flip = &rs600_pre_page_flip, 1167 .pre_page_flip = &rs600_pre_page_flip,
@@ -1229,8 +1230,8 @@ static struct radeon_asic rs780_asic = {
1229 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1230 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1230 .dma = &r600_copy_dma, 1231 .dma = &r600_copy_dma,
1231 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1232 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1232 .copy = &r600_copy_dma, 1233 .copy = &r600_copy_cpdma,
1233 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1234 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1234 }, 1235 },
1235 .surface = { 1236 .surface = {
1236 .set_reg = r600_set_surface_reg, 1237 .set_reg = r600_set_surface_reg,
@@ -1270,6 +1271,7 @@ static struct radeon_asic rs780_asic = {
1270 .get_sclk = &rs780_dpm_get_sclk, 1271 .get_sclk = &rs780_dpm_get_sclk,
1271 .get_mclk = &rs780_dpm_get_mclk, 1272 .get_mclk = &rs780_dpm_get_mclk,
1272 .print_power_state = &rs780_dpm_print_power_state, 1273 .print_power_state = &rs780_dpm_print_power_state,
1274 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
1273 }, 1275 },
1274 .pflip = { 1276 .pflip = {
1275 .pre_page_flip = &rs600_pre_page_flip, 1277 .pre_page_flip = &rs600_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 45d0693cddd5..3d61d5aac18f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -340,6 +340,9 @@ int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
340int r600_copy_blit(struct radeon_device *rdev, 340int r600_copy_blit(struct radeon_device *rdev,
341 uint64_t src_offset, uint64_t dst_offset, 341 uint64_t src_offset, uint64_t dst_offset,
342 unsigned num_gpu_pages, struct radeon_fence **fence); 342 unsigned num_gpu_pages, struct radeon_fence **fence);
343int r600_copy_cpdma(struct radeon_device *rdev,
344 uint64_t src_offset, uint64_t dst_offset,
345 unsigned num_gpu_pages, struct radeon_fence **fence);
343int r600_copy_dma(struct radeon_device *rdev, 346int r600_copy_dma(struct radeon_device *rdev,
344 uint64_t src_offset, uint64_t dst_offset, 347 uint64_t src_offset, uint64_t dst_offset,
345 unsigned num_gpu_pages, struct radeon_fence **fence); 348 unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -418,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
418 struct radeon_ps *ps); 421 struct radeon_ps *ps);
419void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 422void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
420 struct seq_file *m); 423 struct seq_file *m);
424int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
425 enum radeon_dpm_forced_level level);
421/* rs780 dpm */ 426/* rs780 dpm */
422int rs780_dpm_init(struct radeon_device *rdev); 427int rs780_dpm_init(struct radeon_device *rdev);
423int rs780_dpm_enable(struct radeon_device *rdev); 428int rs780_dpm_enable(struct radeon_device *rdev);
@@ -430,11 +435,13 @@ u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
430u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low); 435u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
431void rs780_dpm_print_power_state(struct radeon_device *rdev, 436void rs780_dpm_print_power_state(struct radeon_device *rdev,
432 struct radeon_ps *ps); 437 struct radeon_ps *ps);
438void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
439 struct seq_file *m);
433 440
434/* uvd */ 441/* uvd */
435int r600_uvd_init(struct radeon_device *rdev); 442int r600_uvd_init(struct radeon_device *rdev);
436int r600_uvd_rbc_start(struct radeon_device *rdev); 443int r600_uvd_rbc_start(struct radeon_device *rdev);
437void r600_uvd_rbc_stop(struct radeon_device *rdev); 444void r600_uvd_stop(struct radeon_device *rdev);
438int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 445int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
439void r600_uvd_fence_emit(struct radeon_device *rdev, 446void r600_uvd_fence_emit(struct radeon_device *rdev,
440 struct radeon_fence *fence); 447 struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fbdaff55556b..4ccd61f60eb6 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2782,7 +2782,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2782 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; 2782 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
2783 dividers->enable_dithen = (args.v3.ucCntlFlag & 2783 dividers->enable_dithen = (args.v3.ucCntlFlag &
2784 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; 2784 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
2785 dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); 2785 dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
2786 dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); 2786 dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
2787 dividers->ref_div = args.v3.ucRefDiv; 2787 dividers->ref_div = args.v3.ucRefDiv;
2788 dividers->vco_mode = (args.v3.ucCntlFlag & 2788 dividers->vco_mode = (args.v3.ucCntlFlag &
@@ -3513,7 +3513,6 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3513 u8 frev, crev, i; 3513 u8 frev, crev, i;
3514 u16 data_offset, size; 3514 u16 data_offset, size;
3515 union vram_info *vram_info; 3515 union vram_info *vram_info;
3516 u8 *p;
3517 3516
3518 memset(mem_info, 0, sizeof(struct atom_memory_info)); 3517 memset(mem_info, 0, sizeof(struct atom_memory_info));
3519 3518
@@ -3529,13 +3528,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3529 if (module_index < vram_info->v1_3.ucNumOfVRAMModule) { 3528 if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
3530 ATOM_VRAM_MODULE_V3 *vram_module = 3529 ATOM_VRAM_MODULE_V3 *vram_module =
3531 (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo; 3530 (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
3532 p = (u8 *)vram_info->v1_3.aVramInfo;
3533 3531
3534 for (i = 0; i < module_index; i++) { 3532 for (i = 0; i < module_index; i++) {
3535 vram_module = (ATOM_VRAM_MODULE_V3 *)p;
3536 if (le16_to_cpu(vram_module->usSize) == 0) 3533 if (le16_to_cpu(vram_module->usSize) == 0)
3537 return -EINVAL; 3534 return -EINVAL;
3538 p += le16_to_cpu(vram_module->usSize); 3535 vram_module = (ATOM_VRAM_MODULE_V3 *)
3536 ((u8 *)vram_module + le16_to_cpu(vram_module->usSize));
3539 } 3537 }
3540 mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf; 3538 mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
3541 mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0; 3539 mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
@@ -3547,13 +3545,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3547 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { 3545 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3548 ATOM_VRAM_MODULE_V4 *vram_module = 3546 ATOM_VRAM_MODULE_V4 *vram_module =
3549 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; 3547 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3550 p = (u8 *)vram_info->v1_4.aVramInfo;
3551 3548
3552 for (i = 0; i < module_index; i++) { 3549 for (i = 0; i < module_index; i++) {
3553 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3554 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3550 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3555 return -EINVAL; 3551 return -EINVAL;
3556 p += le16_to_cpu(vram_module->usModuleSize); 3552 vram_module = (ATOM_VRAM_MODULE_V4 *)
3553 ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
3557 } 3554 }
3558 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; 3555 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3559 mem_info->mem_type = vram_module->ucMemoryType & 0xf0; 3556 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
@@ -3572,13 +3569,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3572 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { 3569 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
3573 ATOM_VRAM_MODULE_V7 *vram_module = 3570 ATOM_VRAM_MODULE_V7 *vram_module =
3574 (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo; 3571 (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
3575 p = (u8 *)vram_info->v2_1.aVramInfo;
3576 3572
3577 for (i = 0; i < module_index; i++) { 3573 for (i = 0; i < module_index; i++) {
3578 vram_module = (ATOM_VRAM_MODULE_V7 *)p;
3579 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3574 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3580 return -EINVAL; 3575 return -EINVAL;
3581 p += le16_to_cpu(vram_module->usModuleSize); 3576 vram_module = (ATOM_VRAM_MODULE_V7 *)
3577 ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
3582 } 3578 }
3583 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; 3579 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3584 mem_info->mem_type = vram_module->ucMemoryType & 0xf0; 3580 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
@@ -3628,21 +3624,19 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3628 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { 3624 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3629 ATOM_VRAM_MODULE_V4 *vram_module = 3625 ATOM_VRAM_MODULE_V4 *vram_module =
3630 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; 3626 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3631 ATOM_MEMORY_TIMING_FORMAT *format;
3632 p = (u8 *)vram_info->v1_4.aVramInfo;
3633 3627
3634 for (i = 0; i < module_index; i++) { 3628 for (i = 0; i < module_index; i++) {
3635 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3636 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3629 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3637 return -EINVAL; 3630 return -EINVAL;
3638 p += le16_to_cpu(vram_module->usModuleSize); 3631 vram_module = (ATOM_VRAM_MODULE_V4 *)
3632 ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
3639 } 3633 }
3640 mclk_range_table->num_entries = (u8) 3634 mclk_range_table->num_entries = (u8)
3641 ((vram_module->usModuleSize - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / 3635 ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
3642 mem_timing_size); 3636 mem_timing_size);
3643 p = (u8 *)vram_module->asMemTiming; 3637 p = (u8 *)&vram_module->asMemTiming[0];
3644 for (i = 0; i < mclk_range_table->num_entries; i++) { 3638 for (i = 0; i < mclk_range_table->num_entries; i++) {
3645 format = (ATOM_MEMORY_TIMING_FORMAT *)p; 3639 ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
3646 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); 3640 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
3647 p += mem_timing_size; 3641 p += mem_timing_size;
3648 } 3642 }
@@ -3705,17 +3699,21 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
3705 (ATOM_MEMORY_SETTING_DATA_BLOCK *) 3699 (ATOM_MEMORY_SETTING_DATA_BLOCK *)
3706 ((u8 *)reg_block + (2 * sizeof(u16)) + 3700 ((u8 *)reg_block + (2 * sizeof(u16)) +
3707 le16_to_cpu(reg_block->usRegIndexTblSize)); 3701 le16_to_cpu(reg_block->usRegIndexTblSize));
3702 ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
3708 num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) / 3703 num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
3709 sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1; 3704 sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
3710 if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE) 3705 if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
3711 return -EINVAL; 3706 return -EINVAL;
3712 while (!(reg_block->asRegIndexBuf[i].ucPreRegDataLength & ACCESS_PLACEHOLDER) && 3707 while (i < num_entries) {
3713 (i < num_entries)) { 3708 if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
3709 break;
3714 reg_table->mc_reg_address[i].s1 = 3710 reg_table->mc_reg_address[i].s1 =
3715 (u16)(le16_to_cpu(reg_block->asRegIndexBuf[i].usRegIndex)); 3711 (u16)(le16_to_cpu(format->usRegIndex));
3716 reg_table->mc_reg_address[i].pre_reg_data = 3712 reg_table->mc_reg_address[i].pre_reg_data =
3717 (u8)(reg_block->asRegIndexBuf[i].ucPreRegDataLength); 3713 (u8)(format->ucPreRegDataLength);
3718 i++; 3714 i++;
3715 format = (ATOM_INIT_REG_INDEX_FORMAT *)
3716 ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
3719 } 3717 }
3720 reg_table->last = i; 3718 reg_table->last = i;
3721 while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) && 3719 while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc9e86b..68ce36056019 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
147 enum radeon_combios_table_offset table) 147 enum radeon_combios_table_offset table)
148{ 148{
149 struct radeon_device *rdev = dev->dev_private; 149 struct radeon_device *rdev = dev->dev_private;
150 int rev; 150 int rev, size;
151 uint16_t offset = 0, check_offset; 151 uint16_t offset = 0, check_offset;
152 152
153 if (!rdev->bios) 153 if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
156 switch (table) { 156 switch (table) {
157 /* absolute offset tables */ 157 /* absolute offset tables */
158 case COMBIOS_ASIC_INIT_1_TABLE: 158 case COMBIOS_ASIC_INIT_1_TABLE:
159 check_offset = RBIOS16(rdev->bios_header_start + 0xc); 159 check_offset = 0xc;
160 if (check_offset)
161 offset = check_offset;
162 break; 160 break;
163 case COMBIOS_BIOS_SUPPORT_TABLE: 161 case COMBIOS_BIOS_SUPPORT_TABLE:
164 check_offset = RBIOS16(rdev->bios_header_start + 0x14); 162 check_offset = 0x14;
165 if (check_offset)
166 offset = check_offset;
167 break; 163 break;
168 case COMBIOS_DAC_PROGRAMMING_TABLE: 164 case COMBIOS_DAC_PROGRAMMING_TABLE:
169 check_offset = RBIOS16(rdev->bios_header_start + 0x2a); 165 check_offset = 0x2a;
170 if (check_offset)
171 offset = check_offset;
172 break; 166 break;
173 case COMBIOS_MAX_COLOR_DEPTH_TABLE: 167 case COMBIOS_MAX_COLOR_DEPTH_TABLE:
174 check_offset = RBIOS16(rdev->bios_header_start + 0x2c); 168 check_offset = 0x2c;
175 if (check_offset)
176 offset = check_offset;
177 break; 169 break;
178 case COMBIOS_CRTC_INFO_TABLE: 170 case COMBIOS_CRTC_INFO_TABLE:
179 check_offset = RBIOS16(rdev->bios_header_start + 0x2e); 171 check_offset = 0x2e;
180 if (check_offset)
181 offset = check_offset;
182 break; 172 break;
183 case COMBIOS_PLL_INFO_TABLE: 173 case COMBIOS_PLL_INFO_TABLE:
184 check_offset = RBIOS16(rdev->bios_header_start + 0x30); 174 check_offset = 0x30;
185 if (check_offset)
186 offset = check_offset;
187 break; 175 break;
188 case COMBIOS_TV_INFO_TABLE: 176 case COMBIOS_TV_INFO_TABLE:
189 check_offset = RBIOS16(rdev->bios_header_start + 0x32); 177 check_offset = 0x32;
190 if (check_offset)
191 offset = check_offset;
192 break; 178 break;
193 case COMBIOS_DFP_INFO_TABLE: 179 case COMBIOS_DFP_INFO_TABLE:
194 check_offset = RBIOS16(rdev->bios_header_start + 0x34); 180 check_offset = 0x34;
195 if (check_offset)
196 offset = check_offset;
197 break; 181 break;
198 case COMBIOS_HW_CONFIG_INFO_TABLE: 182 case COMBIOS_HW_CONFIG_INFO_TABLE:
199 check_offset = RBIOS16(rdev->bios_header_start + 0x36); 183 check_offset = 0x36;
200 if (check_offset)
201 offset = check_offset;
202 break; 184 break;
203 case COMBIOS_MULTIMEDIA_INFO_TABLE: 185 case COMBIOS_MULTIMEDIA_INFO_TABLE:
204 check_offset = RBIOS16(rdev->bios_header_start + 0x38); 186 check_offset = 0x38;
205 if (check_offset)
206 offset = check_offset;
207 break; 187 break;
208 case COMBIOS_TV_STD_PATCH_TABLE: 188 case COMBIOS_TV_STD_PATCH_TABLE:
209 check_offset = RBIOS16(rdev->bios_header_start + 0x3e); 189 check_offset = 0x3e;
210 if (check_offset)
211 offset = check_offset;
212 break; 190 break;
213 case COMBIOS_LCD_INFO_TABLE: 191 case COMBIOS_LCD_INFO_TABLE:
214 check_offset = RBIOS16(rdev->bios_header_start + 0x40); 192 check_offset = 0x40;
215 if (check_offset)
216 offset = check_offset;
217 break; 193 break;
218 case COMBIOS_MOBILE_INFO_TABLE: 194 case COMBIOS_MOBILE_INFO_TABLE:
219 check_offset = RBIOS16(rdev->bios_header_start + 0x42); 195 check_offset = 0x42;
220 if (check_offset)
221 offset = check_offset;
222 break; 196 break;
223 case COMBIOS_PLL_INIT_TABLE: 197 case COMBIOS_PLL_INIT_TABLE:
224 check_offset = RBIOS16(rdev->bios_header_start + 0x46); 198 check_offset = 0x46;
225 if (check_offset)
226 offset = check_offset;
227 break; 199 break;
228 case COMBIOS_MEM_CONFIG_TABLE: 200 case COMBIOS_MEM_CONFIG_TABLE:
229 check_offset = RBIOS16(rdev->bios_header_start + 0x48); 201 check_offset = 0x48;
230 if (check_offset)
231 offset = check_offset;
232 break; 202 break;
233 case COMBIOS_SAVE_MASK_TABLE: 203 case COMBIOS_SAVE_MASK_TABLE:
234 check_offset = RBIOS16(rdev->bios_header_start + 0x4a); 204 check_offset = 0x4a;
235 if (check_offset)
236 offset = check_offset;
237 break; 205 break;
238 case COMBIOS_HARDCODED_EDID_TABLE: 206 case COMBIOS_HARDCODED_EDID_TABLE:
239 check_offset = RBIOS16(rdev->bios_header_start + 0x4c); 207 check_offset = 0x4c;
240 if (check_offset)
241 offset = check_offset;
242 break; 208 break;
243 case COMBIOS_ASIC_INIT_2_TABLE: 209 case COMBIOS_ASIC_INIT_2_TABLE:
244 check_offset = RBIOS16(rdev->bios_header_start + 0x4e); 210 check_offset = 0x4e;
245 if (check_offset)
246 offset = check_offset;
247 break; 211 break;
248 case COMBIOS_CONNECTOR_INFO_TABLE: 212 case COMBIOS_CONNECTOR_INFO_TABLE:
249 check_offset = RBIOS16(rdev->bios_header_start + 0x50); 213 check_offset = 0x50;
250 if (check_offset)
251 offset = check_offset;
252 break; 214 break;
253 case COMBIOS_DYN_CLK_1_TABLE: 215 case COMBIOS_DYN_CLK_1_TABLE:
254 check_offset = RBIOS16(rdev->bios_header_start + 0x52); 216 check_offset = 0x52;
255 if (check_offset)
256 offset = check_offset;
257 break; 217 break;
258 case COMBIOS_RESERVED_MEM_TABLE: 218 case COMBIOS_RESERVED_MEM_TABLE:
259 check_offset = RBIOS16(rdev->bios_header_start + 0x54); 219 check_offset = 0x54;
260 if (check_offset)
261 offset = check_offset;
262 break; 220 break;
263 case COMBIOS_EXT_TMDS_INFO_TABLE: 221 case COMBIOS_EXT_TMDS_INFO_TABLE:
264 check_offset = RBIOS16(rdev->bios_header_start + 0x58); 222 check_offset = 0x58;
265 if (check_offset)
266 offset = check_offset;
267 break; 223 break;
268 case COMBIOS_MEM_CLK_INFO_TABLE: 224 case COMBIOS_MEM_CLK_INFO_TABLE:
269 check_offset = RBIOS16(rdev->bios_header_start + 0x5a); 225 check_offset = 0x5a;
270 if (check_offset)
271 offset = check_offset;
272 break; 226 break;
273 case COMBIOS_EXT_DAC_INFO_TABLE: 227 case COMBIOS_EXT_DAC_INFO_TABLE:
274 check_offset = RBIOS16(rdev->bios_header_start + 0x5c); 228 check_offset = 0x5c;
275 if (check_offset)
276 offset = check_offset;
277 break; 229 break;
278 case COMBIOS_MISC_INFO_TABLE: 230 case COMBIOS_MISC_INFO_TABLE:
279 check_offset = RBIOS16(rdev->bios_header_start + 0x5e); 231 check_offset = 0x5e;
280 if (check_offset)
281 offset = check_offset;
282 break; 232 break;
283 case COMBIOS_CRT_INFO_TABLE: 233 case COMBIOS_CRT_INFO_TABLE:
284 check_offset = RBIOS16(rdev->bios_header_start + 0x60); 234 check_offset = 0x60;
285 if (check_offset)
286 offset = check_offset;
287 break; 235 break;
288 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: 236 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
289 check_offset = RBIOS16(rdev->bios_header_start + 0x62); 237 check_offset = 0x62;
290 if (check_offset)
291 offset = check_offset;
292 break; 238 break;
293 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: 239 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
294 check_offset = RBIOS16(rdev->bios_header_start + 0x64); 240 check_offset = 0x64;
295 if (check_offset)
296 offset = check_offset;
297 break; 241 break;
298 case COMBIOS_FAN_SPEED_INFO_TABLE: 242 case COMBIOS_FAN_SPEED_INFO_TABLE:
299 check_offset = RBIOS16(rdev->bios_header_start + 0x66); 243 check_offset = 0x66;
300 if (check_offset)
301 offset = check_offset;
302 break; 244 break;
303 case COMBIOS_OVERDRIVE_INFO_TABLE: 245 case COMBIOS_OVERDRIVE_INFO_TABLE:
304 check_offset = RBIOS16(rdev->bios_header_start + 0x68); 246 check_offset = 0x68;
305 if (check_offset)
306 offset = check_offset;
307 break; 247 break;
308 case COMBIOS_OEM_INFO_TABLE: 248 case COMBIOS_OEM_INFO_TABLE:
309 check_offset = RBIOS16(rdev->bios_header_start + 0x6a); 249 check_offset = 0x6a;
310 if (check_offset)
311 offset = check_offset;
312 break; 250 break;
313 case COMBIOS_DYN_CLK_2_TABLE: 251 case COMBIOS_DYN_CLK_2_TABLE:
314 check_offset = RBIOS16(rdev->bios_header_start + 0x6c); 252 check_offset = 0x6c;
315 if (check_offset)
316 offset = check_offset;
317 break; 253 break;
318 case COMBIOS_POWER_CONNECTOR_INFO_TABLE: 254 case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
319 check_offset = RBIOS16(rdev->bios_header_start + 0x6e); 255 check_offset = 0x6e;
320 if (check_offset)
321 offset = check_offset;
322 break; 256 break;
323 case COMBIOS_I2C_INFO_TABLE: 257 case COMBIOS_I2C_INFO_TABLE:
324 check_offset = RBIOS16(rdev->bios_header_start + 0x70); 258 check_offset = 0x70;
325 if (check_offset)
326 offset = check_offset;
327 break; 259 break;
328 /* relative offset tables */ 260 /* relative offset tables */
329 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ 261 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
439 } 371 }
440 break; 372 break;
441 default: 373 default:
374 check_offset = 0;
442 break; 375 break;
443 } 376 }
444 377
445 return offset; 378 size = RBIOS8(rdev->bios_header_start + 0x6);
379 /* check absolute offset tables */
380 if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
381 offset = RBIOS16(rdev->bios_header_start + check_offset);
446 382
383 return offset;
447} 384}
448 385
449bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) 386bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
965 dac = RBIOS8(dac_info + 0x3) & 0xf; 902 dac = RBIOS8(dac_info + 0x3) & 0xf;
966 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 903 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
967 } 904 }
968 /* if the values are all zeros, use the table */ 905 /* if the values are zeros, use the table */
969 if (p_dac->ps2_pdac_adj) 906 if ((dac == 0) || (bg == 0))
907 found = 0;
908 else
970 found = 1; 909 found = 1;
971 } 910 }
972 911
973 /* quirks */ 912 /* quirks */
913 /* Radeon 7000 (RV100) */
914 if (((dev->pdev->device == 0x5159) &&
915 (dev->pdev->subsystem_vendor == 0x174B) &&
916 (dev->pdev->subsystem_device == 0x7c28)) ||
974 /* Radeon 9100 (R200) */ 917 /* Radeon 9100 (R200) */
975 if ((dev->pdev->device == 0x514D) && 918 ((dev->pdev->device == 0x514D) &&
976 (dev->pdev->subsystem_vendor == 0x174B) && 919 (dev->pdev->subsystem_vendor == 0x174B) &&
977 (dev->pdev->subsystem_device == 0x7149)) { 920 (dev->pdev->subsystem_device == 0x7149))) {
978 /* vbios value is bad, use the default */ 921 /* vbios value is bad, use the default */
979 found = 0; 922 found = 0;
980 } 923 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 82335e38ec4f..63398ae1dbf5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev,
1163 mutex_init(&rdev->gem.mutex); 1163 mutex_init(&rdev->gem.mutex);
1164 mutex_init(&rdev->pm.mutex); 1164 mutex_init(&rdev->pm.mutex);
1165 mutex_init(&rdev->gpu_clock_mutex); 1165 mutex_init(&rdev->gpu_clock_mutex);
1166 mutex_init(&rdev->srbm_mutex);
1166 init_rwsem(&rdev->pm.mclk_lock); 1167 init_rwsem(&rdev->pm.mclk_lock);
1167 init_rwsem(&rdev->exclusive_lock); 1168 init_rwsem(&rdev->exclusive_lock);
1168 init_waitqueue_head(&rdev->irq.vblank_queue); 1169 init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1519 radeon_save_bios_scratch_regs(rdev); 1520 radeon_save_bios_scratch_regs(rdev);
1520 /* block TTM */ 1521 /* block TTM */
1521 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1522 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1523 radeon_pm_suspend(rdev);
1522 radeon_suspend(rdev); 1524 radeon_suspend(rdev);
1523 1525
1524 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1526 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1564,6 +1566,7 @@ retry:
1564 } 1566 }
1565 } 1567 }
1566 1568
1569 radeon_pm_resume(rdev);
1567 drm_helper_resume_force_mode(rdev->ddev); 1570 drm_helper_resume_force_mode(rdev->ddev);
1568 1571
1569 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1572 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e5419b350170..29876b1be8ec 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -167,6 +167,7 @@ int radeon_msi = -1;
167int radeon_lockup_timeout = 10000; 167int radeon_lockup_timeout = 10000;
168int radeon_fastfb = 0; 168int radeon_fastfb = 0;
169int radeon_dpm = -1; 169int radeon_dpm = -1;
170int radeon_aspm = -1;
170 171
171MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 172MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
172module_param_named(no_wb, radeon_no_wb, int, 0444); 173module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -225,6 +226,9 @@ module_param_named(fastfb, radeon_fastfb, int, 0444);
225MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); 226MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
226module_param_named(dpm, radeon_dpm, int, 0444); 227module_param_named(dpm, radeon_dpm, int, 0444);
227 228
229MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
230module_param_named(aspm, radeon_aspm, int, 0444);
231
228static struct pci_device_id pciidlist[] = { 232static struct pci_device_id pciidlist[] = {
229 radeon_PCI_IDS 233 radeon_PCI_IDS
230}; 234};
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 43ec4a401f07..b990b1a2bd50 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
207 if (rdev->gart.robj == NULL) { 207 if (rdev->gart.robj == NULL) {
208 return; 208 return;
209 } 209 }
210 radeon_gart_table_vram_unpin(rdev);
211 radeon_bo_unref(&rdev->gart.robj); 210 radeon_bo_unref(&rdev->gart.robj);
212} 211}
213 212
@@ -467,6 +466,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
467 size *= 2; 466 size *= 2;
468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 467 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
469 RADEON_GPU_PAGE_ALIGN(size), 468 RADEON_GPU_PAGE_ALIGN(size),
469 RADEON_VM_PTB_ALIGN_SIZE,
470 RADEON_GEM_DOMAIN_VRAM); 470 RADEON_GEM_DOMAIN_VRAM);
471 if (r) { 471 if (r) {
472 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 472 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -620,10 +620,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
620 } 620 }
621 621
622retry: 622retry:
623 pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); 623 pd_size = radeon_vm_directory_size(rdev);
624 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 624 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
625 &vm->page_directory, pd_size, 625 &vm->page_directory, pd_size,
626 RADEON_GPU_PAGE_SIZE, false); 626 RADEON_VM_PTB_ALIGN_SIZE, false);
627 if (r == -ENOMEM) { 627 if (r == -ENOMEM) {
628 r = radeon_vm_evict(rdev, vm); 628 r = radeon_vm_evict(rdev, vm);
629 if (r) 629 if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index bcdefd1dcd43..081886b0642d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -260,10 +260,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
260{ 260{
261 int r = 0; 261 int r = 0;
262 262
263 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
264 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
265 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
266
267 spin_lock_init(&rdev->irq.lock); 263 spin_lock_init(&rdev->irq.lock);
268 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 264 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
269 if (r) { 265 if (r) {
@@ -285,6 +281,11 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
285 rdev->irq.installed = false; 281 rdev->irq.installed = false;
286 return r; 282 return r;
287 } 283 }
284
285 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
286 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
288
288 DRM_INFO("radeon: irq initialized.\n"); 289 DRM_INFO("radeon: irq initialized.\n");
289 return 0; 290 return 0;
290} 291}
@@ -304,8 +305,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
304 rdev->irq.installed = false; 305 rdev->irq.installed = false;
305 if (rdev->msi_enabled) 306 if (rdev->msi_enabled)
306 pci_disable_msi(rdev->pdev); 307 pci_disable_msi(rdev->pdev);
308 flush_work(&rdev->hotplug_work);
307 } 309 }
308 flush_work(&rdev->hotplug_work);
309} 310}
310 311
311/** 312/**
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0219d263e2df..2020bf4a3830 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -377,6 +377,7 @@ int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
377 domain = lobj->alt_domain; 377 domain = lobj->alt_domain;
378 goto retry; 378 goto retry;
379 } 379 }
380 ttm_eu_backoff_reservation(ticket, head);
380 return r; 381 return r;
381 } 382 }
382 } 383 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 91519a5622b4..49c82c480013 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -174,7 +174,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
174 174
175extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, 175extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
176 struct radeon_sa_manager *sa_manager, 176 struct radeon_sa_manager *sa_manager,
177 unsigned size, u32 domain); 177 unsigned size, u32 align, u32 domain);
178extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, 178extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
179 struct radeon_sa_manager *sa_manager); 179 struct radeon_sa_manager *sa_manager);
180extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, 180extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f374c467aaca..c557850cd345 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev)
1176 case CHIP_VERDE: 1176 case CHIP_VERDE:
1177 case CHIP_OLAND: 1177 case CHIP_OLAND:
1178 case CHIP_HAINAN: 1178 case CHIP_HAINAN:
1179 if (radeon_dpm == 1) 1179 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1180 if (!rdev->rlc_fw)
1181 rdev->pm.pm_method = PM_METHOD_PROFILE;
1182 else if ((rdev->family >= CHIP_RV770) &&
1183 (!(rdev->flags & RADEON_IS_IGP)) &&
1184 (!rdev->smc_fw))
1185 rdev->pm.pm_method = PM_METHOD_PROFILE;
1186 else if (radeon_dpm == 1)
1180 rdev->pm.pm_method = PM_METHOD_DPM; 1187 rdev->pm.pm_method = PM_METHOD_DPM;
1181 else 1188 else
1182 rdev->pm.pm_method = PM_METHOD_PROFILE; 1189 rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 5f1c51a776ed..fb5ea6208970 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
224 } 224 }
225 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, 225 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
226 RADEON_IB_POOL_SIZE*64*1024, 226 RADEON_IB_POOL_SIZE*64*1024,
227 RADEON_GPU_PAGE_SIZE,
227 RADEON_GEM_DOMAIN_GTT); 228 RADEON_GEM_DOMAIN_GTT);
228 if (r) { 229 if (r) {
229 return r; 230 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0abe5a9431bb..f0bac68254b7 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
49 49
50int radeon_sa_bo_manager_init(struct radeon_device *rdev, 50int radeon_sa_bo_manager_init(struct radeon_device *rdev,
51 struct radeon_sa_manager *sa_manager, 51 struct radeon_sa_manager *sa_manager,
52 unsigned size, u32 domain) 52 unsigned size, u32 align, u32 domain)
53{ 53{
54 int i, r; 54 int i, r;
55 55
@@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
57 sa_manager->bo = NULL; 57 sa_manager->bo = NULL;
58 sa_manager->size = size; 58 sa_manager->size = size;
59 sa_manager->domain = domain; 59 sa_manager->domain = domain;
60 sa_manager->align = align;
60 sa_manager->hole = &sa_manager->olist; 61 sa_manager->hole = &sa_manager->olist;
61 INIT_LIST_HEAD(&sa_manager->olist); 62 INIT_LIST_HEAD(&sa_manager->olist);
62 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 63 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
63 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
64 } 65 }
65 66
66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, 67 r = radeon_bo_create(rdev, size, align, true,
67 domain, NULL, &sa_manager->bo); 68 domain, NULL, &sa_manager->bo);
68 if (r) { 69 if (r) {
69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
@@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
317 unsigned tries[RADEON_NUM_RINGS]; 318 unsigned tries[RADEON_NUM_RINGS];
318 int i, r; 319 int i, r;
319 320
320 BUG_ON(align > RADEON_GPU_PAGE_SIZE); 321 BUG_ON(align > sa_manager->align);
321 BUG_ON(size > sa_manager->size); 322 BUG_ON(size > sa_manager->size);
322 323
323 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); 324 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 41efcec28cd8..f1c15754e73c 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -56,20 +56,12 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
56 56
57int radeon_uvd_init(struct radeon_device *rdev) 57int radeon_uvd_init(struct radeon_device *rdev)
58{ 58{
59 struct platform_device *pdev;
60 unsigned long bo_size; 59 unsigned long bo_size;
61 const char *fw_name; 60 const char *fw_name;
62 int i, r; 61 int i, r;
63 62
64 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); 63 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
65 64
66 pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
67 r = IS_ERR(pdev);
68 if (r) {
69 dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
70 return -EINVAL;
71 }
72
73 switch (rdev->family) { 65 switch (rdev->family) {
74 case CHIP_RV710: 66 case CHIP_RV710:
75 case CHIP_RV730: 67 case CHIP_RV730:
@@ -112,16 +104,13 @@ int radeon_uvd_init(struct radeon_device *rdev)
112 return -EINVAL; 104 return -EINVAL;
113 } 105 }
114 106
115 r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev); 107 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
116 if (r) { 108 if (r) {
117 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", 109 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
118 fw_name); 110 fw_name);
119 platform_device_unregister(pdev);
120 return r; 111 return r;
121 } 112 }
122 113
123 platform_device_unregister(pdev);
124
125 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + 114 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
126 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 115 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
127 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 116 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
@@ -131,16 +120,29 @@ int radeon_uvd_init(struct radeon_device *rdev)
131 return r; 120 return r;
132 } 121 }
133 122
134 r = radeon_uvd_resume(rdev); 123 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
135 if (r) 124 if (r) {
125 radeon_bo_unref(&rdev->uvd.vcpu_bo);
126 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
136 return r; 127 return r;
128 }
137 129
138 memset(rdev->uvd.cpu_addr, 0, bo_size); 130 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
139 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); 131 &rdev->uvd.gpu_addr);
132 if (r) {
133 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
134 radeon_bo_unref(&rdev->uvd.vcpu_bo);
135 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
136 return r;
137 }
140 138
141 r = radeon_uvd_suspend(rdev); 139 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
142 if (r) 140 if (r) {
141 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
143 return r; 142 return r;
143 }
144
145 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
144 146
145 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 147 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
146 atomic_set(&rdev->uvd.handles[i], 0); 148 atomic_set(&rdev->uvd.handles[i], 0);
@@ -152,70 +154,73 @@ int radeon_uvd_init(struct radeon_device *rdev)
152 154
153void radeon_uvd_fini(struct radeon_device *rdev) 155void radeon_uvd_fini(struct radeon_device *rdev)
154{ 156{
155 radeon_uvd_suspend(rdev);
156 radeon_bo_unref(&rdev->uvd.vcpu_bo);
157}
158
159int radeon_uvd_suspend(struct radeon_device *rdev)
160{
161 int r; 157 int r;
162 158
163 if (rdev->uvd.vcpu_bo == NULL) 159 if (rdev->uvd.vcpu_bo == NULL)
164 return 0; 160 return;
165 161
166 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 162 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
167 if (!r) { 163 if (!r) {
168 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 164 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
169 radeon_bo_unpin(rdev->uvd.vcpu_bo); 165 radeon_bo_unpin(rdev->uvd.vcpu_bo);
170 rdev->uvd.cpu_addr = NULL;
171 if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
172 radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
173 }
174 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 166 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
175
176 if (rdev->uvd.cpu_addr) {
177 radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
178 } else {
179 rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
180 }
181 } 167 }
182 return r; 168
169 radeon_bo_unref(&rdev->uvd.vcpu_bo);
170
171 release_firmware(rdev->uvd_fw);
172}
173
174int radeon_uvd_suspend(struct radeon_device *rdev)
175{
176 unsigned size;
177 void *ptr;
178 int i;
179
180 if (rdev->uvd.vcpu_bo == NULL)
181 return 0;
182
183 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
184 if (atomic_read(&rdev->uvd.handles[i]))
185 break;
186
187 if (i == RADEON_MAX_UVD_HANDLES)
188 return 0;
189
190 size = radeon_bo_size(rdev->uvd.vcpu_bo);
191 size -= rdev->uvd_fw->size;
192
193 ptr = rdev->uvd.cpu_addr;
194 ptr += rdev->uvd_fw->size;
195
196 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
197 memcpy(rdev->uvd.saved_bo, ptr, size);
198
199 return 0;
183} 200}
184 201
185int radeon_uvd_resume(struct radeon_device *rdev) 202int radeon_uvd_resume(struct radeon_device *rdev)
186{ 203{
187 int r; 204 unsigned size;
205 void *ptr;
188 206
189 if (rdev->uvd.vcpu_bo == NULL) 207 if (rdev->uvd.vcpu_bo == NULL)
190 return -EINVAL; 208 return -EINVAL;
191 209
192 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 210 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
193 if (r) {
194 radeon_bo_unref(&rdev->uvd.vcpu_bo);
195 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
196 return r;
197 }
198 211
199 /* Have been pin in cpu unmap unpin */ 212 size = radeon_bo_size(rdev->uvd.vcpu_bo);
200 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 213 size -= rdev->uvd_fw->size;
201 radeon_bo_unpin(rdev->uvd.vcpu_bo);
202 214
203 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 215 ptr = rdev->uvd.cpu_addr;
204 &rdev->uvd.gpu_addr); 216 ptr += rdev->uvd_fw->size;
205 if (r) {
206 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
207 radeon_bo_unref(&rdev->uvd.vcpu_bo);
208 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
209 return r;
210 }
211 217
212 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); 218 if (rdev->uvd.saved_bo != NULL) {
213 if (r) { 219 memcpy(ptr, rdev->uvd.saved_bo, size);
214 dev_err(rdev->dev, "(%d) UVD map failed\n", r); 220 kfree(rdev->uvd.saved_bo);
215 return r; 221 rdev->uvd.saved_bo = NULL;
216 } 222 } else
217 223 memset(ptr, 0, size);
218 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
219 224
220 return 0; 225 return 0;
221} 226}
@@ -230,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
230{ 235{
231 int i, r; 236 int i, r;
232 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 237 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
233 if (rdev->uvd.filp[i] == filp) { 238 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
234 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); 239 if (handle != 0 && rdev->uvd.filp[i] == filp) {
235 struct radeon_fence *fence; 240 struct radeon_fence *fence;
236 241
237 r = radeon_uvd_get_destroy_msg(rdev, 242 r = radeon_uvd_get_destroy_msg(rdev,
@@ -352,8 +357,10 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
352 } 357 }
353 358
354 r = radeon_bo_kmap(bo, &ptr); 359 r = radeon_bo_kmap(bo, &ptr);
355 if (r) 360 if (r) {
361 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
356 return r; 362 return r;
363 }
357 364
358 msg = ptr + offset; 365 msg = ptr + offset;
359 366
@@ -379,8 +386,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
379 radeon_bo_kunmap(bo); 386 radeon_bo_kunmap(bo);
380 return 0; 387 return 0;
381 } else { 388 } else {
382 /* it's a create msg, no special handling needed */
383 radeon_bo_kunmap(bo); 389 radeon_bo_kunmap(bo);
390
391 if (msg_type != 0) {
392 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
393 return -EINVAL;
394 }
395
396 /* it's a create msg, no special handling needed */
384 } 397 }
385 398
386 /* create or decode, validate the handle */ 399 /* create or decode, validate the handle */
@@ -403,7 +416,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
403 416
404static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, 417static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
405 int data0, int data1, 418 int data0, int data1,
406 unsigned buf_sizes[]) 419 unsigned buf_sizes[], bool *has_msg_cmd)
407{ 420{
408 struct radeon_cs_chunk *relocs_chunk; 421 struct radeon_cs_chunk *relocs_chunk;
409 struct radeon_cs_reloc *reloc; 422 struct radeon_cs_reloc *reloc;
@@ -432,7 +445,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
432 445
433 if (cmd < 0x4) { 446 if (cmd < 0x4) {
434 if ((end - start) < buf_sizes[cmd]) { 447 if ((end - start) < buf_sizes[cmd]) {
435 DRM_ERROR("buffer to small (%d / %d)!\n", 448 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
436 (unsigned)(end - start), buf_sizes[cmd]); 449 (unsigned)(end - start), buf_sizes[cmd]);
437 return -EINVAL; 450 return -EINVAL;
438 } 451 }
@@ -457,9 +470,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
457 } 470 }
458 471
459 if (cmd == 0) { 472 if (cmd == 0) {
473 if (*has_msg_cmd) {
474 DRM_ERROR("More than one message in a UVD-IB!\n");
475 return -EINVAL;
476 }
477 *has_msg_cmd = true;
460 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); 478 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
461 if (r) 479 if (r)
462 return r; 480 return r;
481 } else if (!*has_msg_cmd) {
482 DRM_ERROR("Message needed before other commands are send!\n");
483 return -EINVAL;
463 } 484 }
464 485
465 return 0; 486 return 0;
@@ -468,7 +489,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
468static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, 489static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
469 struct radeon_cs_packet *pkt, 490 struct radeon_cs_packet *pkt,
470 int *data0, int *data1, 491 int *data0, int *data1,
471 unsigned buf_sizes[]) 492 unsigned buf_sizes[],
493 bool *has_msg_cmd)
472{ 494{
473 int i, r; 495 int i, r;
474 496
@@ -482,7 +504,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
482 *data1 = p->idx; 504 *data1 = p->idx;
483 break; 505 break;
484 case UVD_GPCOM_VCPU_CMD: 506 case UVD_GPCOM_VCPU_CMD:
485 r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); 507 r = radeon_uvd_cs_reloc(p, *data0, *data1,
508 buf_sizes, has_msg_cmd);
486 if (r) 509 if (r)
487 return r; 510 return r;
488 break; 511 break;
@@ -503,6 +526,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
503 struct radeon_cs_packet pkt; 526 struct radeon_cs_packet pkt;
504 int r, data0 = 0, data1 = 0; 527 int r, data0 = 0, data1 = 0;
505 528
529 /* does the IB has a msg command */
530 bool has_msg_cmd = false;
531
506 /* minimum buffer sizes */ 532 /* minimum buffer sizes */
507 unsigned buf_sizes[] = { 533 unsigned buf_sizes[] = {
508 [0x00000000] = 2048, 534 [0x00000000] = 2048,
@@ -529,8 +555,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
529 return r; 555 return r;
530 switch (pkt.type) { 556 switch (pkt.type) {
531 case RADEON_PACKET_TYPE0: 557 case RADEON_PACKET_TYPE0:
532 r = radeon_uvd_cs_reg(p, &pkt, &data0, 558 r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
533 &data1, buf_sizes); 559 buf_sizes, &has_msg_cmd);
534 if (r) 560 if (r)
535 return r; 561 return r;
536 break; 562 break;
@@ -542,6 +568,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
542 return -EINVAL; 568 return -EINVAL;
543 } 569 }
544 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 570 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
571
572 if (!has_msg_cmd) {
573 DRM_ERROR("UVD-IBs need a msg command!\n");
574 return -EINVAL;
575 }
576
545 return 0; 577 return 0;
546} 578}
547 579
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index bef832a62fee..d1a1ce73bd45 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -28,6 +28,7 @@
28#include "r600_dpm.h" 28#include "r600_dpm.h"
29#include "rs780_dpm.h" 29#include "rs780_dpm.h"
30#include "atom.h" 30#include "atom.h"
31#include <linux/seq_file.h>
31 32
32static struct igp_ps *rs780_get_ps(struct radeon_ps *rps) 33static struct igp_ps *rs780_get_ps(struct radeon_ps *rps)
33{ 34{
@@ -961,3 +962,27 @@ u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low)
961 962
962 return pi->bootup_uma_clk; 963 return pi->bootup_uma_clk;
963} 964}
965
966void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
967 struct seq_file *m)
968{
969 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
970 struct igp_ps *ps = rs780_get_ps(rps);
971 u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK;
972 u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
973 u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1;
974 u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 +
975 ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1;
976 u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) /
977 (post_div * ref_div);
978
979 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
980
981 /* guess based on the current sclk */
982 if (sclk < (ps->sclk_low + 500))
983 seq_printf(m, "power level 0 sclk: %u vddc_index: %d\n",
984 ps->sclk_low, ps->min_voltage);
985 else
986 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
987 ps->sclk_high, ps->max_voltage);
988}
diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h
index b1142ed1c628..cfbe9a43d97b 100644
--- a/drivers/gpu/drm/radeon/rs780d.h
+++ b/drivers/gpu/drm/radeon/rs780d.h
@@ -28,6 +28,7 @@
28# define SPLL_SLEEP (1 << 1) 28# define SPLL_SLEEP (1 << 1)
29# define SPLL_REF_DIV(x) ((x) << 2) 29# define SPLL_REF_DIV(x) ((x) << 2)
30# define SPLL_REF_DIV_MASK (7 << 2) 30# define SPLL_REF_DIV_MASK (7 << 2)
31# define SPLL_REF_DIV_SHIFT 2
31# define SPLL_FB_DIV(x) ((x) << 5) 32# define SPLL_FB_DIV(x) ((x) << 5)
32# define SPLL_FB_DIV_MASK (0xff << 2) 33# define SPLL_FB_DIV_MASK (0xff << 2)
33# define SPLL_FB_DIV_SHIFT 2 34# define SPLL_FB_DIV_SHIFT 2
@@ -36,8 +37,10 @@
36# define SPLL_PULSENUM_MASK (3 << 14) 37# define SPLL_PULSENUM_MASK (3 << 14)
37# define SPLL_SW_HILEN(x) ((x) << 16) 38# define SPLL_SW_HILEN(x) ((x) << 16)
38# define SPLL_SW_HILEN_MASK (0xf << 16) 39# define SPLL_SW_HILEN_MASK (0xf << 16)
40# define SPLL_SW_HILEN_SHIFT 16
39# define SPLL_SW_LOLEN(x) ((x) << 20) 41# define SPLL_SW_LOLEN(x) ((x) << 20)
40# define SPLL_SW_LOLEN_MASK (0xf << 20) 42# define SPLL_SW_LOLEN_MASK (0xf << 20)
43# define SPLL_SW_LOLEN_SHIFT 20
41# define SPLL_DIVEN (1 << 24) 44# define SPLL_DIVEN (1 << 24)
42# define SPLL_BYPASS_EN (1 << 25) 45# define SPLL_BYPASS_EN (1 << 25)
43# define SPLL_CHG_STATUS (1 << 29) 46# define SPLL_CHG_STATUS (1 << 29)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 8303de267ee5..bdd888b4db2b 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
819 POWERMODE1(calculate_memory_refresh_rate(rdev, 819 POWERMODE1(calculate_memory_refresh_rate(rdev,
820 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | 820 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
821 POWERMODE2(calculate_memory_refresh_rate(rdev, 821 POWERMODE2(calculate_memory_refresh_rate(rdev,
822 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | 822 pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
823 POWERMODE3(calculate_memory_refresh_rate(rdev, 823 POWERMODE3(calculate_memory_refresh_rate(rdev,
824 pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); 824 pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
825 WREG32(ARB_RFSH_RATE, arb_refresh_rate); 825 WREG32(ARB_RFSH_RATE, arb_refresh_rate);
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev)
1182 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 1182 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1183 1183
1184 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 1184 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1185 if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { 1185 if (rdev->pm.dpm.new_active_crtcs & 1) {
1186 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1186 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1187 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1187 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1188 } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { 1188 } else if (rdev->pm.dpm.new_active_crtcs & 2) {
1189 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1189 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1190 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1190 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1191 } else { 1191 } else {
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1670 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; 1670 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1671 int ret; 1671 int ret;
1672 1672
1673 pi->restricted_levels = 0;
1674
1673 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1675 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1674 1676
1675 rv6xx_clear_vc(rdev); 1677 rv6xx_clear_vc(rdev);
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1756 1758
1757 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1758 1760
1761 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1762
1759 return 0; 1763 return 0;
1760} 1764}
1761 1765
@@ -1763,12 +1767,14 @@ void rv6xx_setup_asic(struct radeon_device *rdev)
1763{ 1767{
1764 r600_enable_acpi_pm(rdev); 1768 r600_enable_acpi_pm(rdev);
1765 1769
1766 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) 1770 if (radeon_aspm != 0) {
1767 rv6xx_enable_l0s(rdev); 1771 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
1768 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) 1772 rv6xx_enable_l0s(rdev);
1769 rv6xx_enable_l1(rdev); 1773 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
1770 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) 1774 rv6xx_enable_l1(rdev);
1771 rv6xx_enable_pll_sleep_in_l1(rdev); 1775 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
1776 rv6xx_enable_pll_sleep_in_l1(rdev);
1777 }
1772} 1778}
1773 1779
1774void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev) 1780void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev)
@@ -1938,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
1938 1944
1939int rv6xx_dpm_init(struct radeon_device *rdev) 1945int rv6xx_dpm_init(struct radeon_device *rdev)
1940{ 1946{
1941 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 1947 struct radeon_atom_ss ss;
1942 uint16_t data_offset, size;
1943 uint8_t frev, crev;
1944 struct atom_clock_dividers dividers; 1948 struct atom_clock_dividers dividers;
1945 struct rv6xx_power_info *pi; 1949 struct rv6xx_power_info *pi;
1946 int ret; 1950 int ret;
@@ -1983,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
1983 1987
1984 pi->gfx_clock_gating = true; 1988 pi->gfx_clock_gating = true;
1985 1989
1986 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 1990 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
1987 &frev, &crev, &data_offset)) { 1991 ASIC_INTERNAL_ENGINE_SS, 0);
1988 pi->sclk_ss = true; 1992 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
1989 pi->mclk_ss = true; 1993 ASIC_INTERNAL_MEMORY_SS, 0);
1994
1995 /* Disable sclk ss, causes hangs on a lot of systems */
1996 pi->sclk_ss = false;
1997
1998 if (pi->sclk_ss || pi->mclk_ss)
1990 pi->dynamic_ss = true; 1999 pi->dynamic_ss = true;
1991 } else { 2000 else
1992 pi->sclk_ss = false;
1993 pi->mclk_ss = false;
1994 pi->dynamic_ss = false; 2001 pi->dynamic_ss = false;
1995 }
1996 2002
1997 pi->dynamic_pcie_gen2 = true; 2003 pi->dynamic_pcie_gen2 = true;
1998 2004
@@ -2083,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
2083 else 2089 else
2084 return requested_state->high.mclk; 2090 return requested_state->high.mclk;
2085} 2091}
2092
2093int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
2094 enum radeon_dpm_forced_level level)
2095{
2096 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
2097
2098 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
2099 pi->restricted_levels = 3;
2100 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
2101 pi->restricted_levels = 2;
2102 } else {
2103 pi->restricted_levels = 0;
2104 }
2105
2106 rv6xx_clear_vc(rdev);
2107 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
2108 r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
2109 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
2110 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
2111 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
2112 rv6xx_enable_medium(rdev);
2113 rv6xx_enable_high(rdev);
2114 if (pi->restricted_levels == 3)
2115 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
2116 rv6xx_program_vc(rdev);
2117 rv6xx_program_at(rdev);
2118
2119 rdev->pm.dpm.forced_level = level;
2120
2121 return 0;
2122}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4a62ad2e5399..bcc68ec204ad 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev)
1829 /* enable pcie gen2 link */ 1829 /* enable pcie gen2 link */
1830 rv770_pcie_gen2_enable(rdev); 1830 rv770_pcie_gen2_enable(rdev);
1831 1831
1832 rv770_mc_program(rdev);
1833
1832 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1834 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1833 r = r600_init_microcode(rdev); 1835 r = r600_init_microcode(rdev);
1834 if (r) { 1836 if (r) {
@@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev)
1841 if (r) 1843 if (r)
1842 return r; 1844 return r;
1843 1845
1844 rv770_mc_program(rdev);
1845 if (rdev->flags & RADEON_IS_AGP) { 1846 if (rdev->flags & RADEON_IS_AGP) {
1846 rv770_agp_enable(rdev); 1847 rv770_agp_enable(rdev);
1847 } else { 1848 } else {
@@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev)
1983int rv770_suspend(struct radeon_device *rdev) 1984int rv770_suspend(struct radeon_device *rdev)
1984{ 1985{
1985 r600_audio_fini(rdev); 1986 r600_audio_fini(rdev);
1987 r600_uvd_stop(rdev);
1986 radeon_uvd_suspend(rdev); 1988 radeon_uvd_suspend(rdev);
1987 r700_cp_stop(rdev); 1989 r700_cp_stop(rdev);
1988 r600_dma_stop(rdev); 1990 r600_dma_stop(rdev);
@@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev)
2098 radeon_ib_pool_fini(rdev); 2100 radeon_ib_pool_fini(rdev);
2099 radeon_irq_kms_fini(rdev); 2101 radeon_irq_kms_fini(rdev);
2100 rv770_pcie_gart_fini(rdev); 2102 rv770_pcie_gart_fini(rdev);
2103 r600_uvd_stop(rdev);
2101 radeon_uvd_fini(rdev); 2104 radeon_uvd_fini(rdev);
2102 r600_vram_scratch_fini(rdev); 2105 r600_vram_scratch_fini(rdev);
2103 radeon_gem_fini(rdev); 2106 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index d914e04ea39a..094c67a29d0d 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2099,12 +2099,14 @@ void rv770_dpm_setup_asic(struct radeon_device *rdev)
2099 2099
2100 rv770_enable_acpi_pm(rdev); 2100 rv770_enable_acpi_pm(rdev);
2101 2101
2102 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) 2102 if (radeon_aspm != 0) {
2103 rv770_enable_l0s(rdev); 2103 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
2104 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) 2104 rv770_enable_l0s(rdev);
2105 rv770_enable_l1(rdev); 2105 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
2106 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) 2106 rv770_enable_l1(rdev);
2107 rv770_enable_pll_sleep_in_l1(rdev); 2107 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
2108 rv770_enable_pll_sleep_in_l1(rdev);
2109 }
2108} 2110}
2109 2111
2110void rv770_dpm_display_configuration_changed(struct radeon_device *rdev) 2112void rv770_dpm_display_configuration_changed(struct radeon_device *rdev)
@@ -2317,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
2317 return 0; 2319 return 0;
2318} 2320}
2319 2321
2322void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2323{
2324 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2325 struct radeon_atom_ss ss;
2326
2327 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2328 ASIC_INTERNAL_ENGINE_SS, 0);
2329 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2330 ASIC_INTERNAL_MEMORY_SS, 0);
2331
2332 if (pi->sclk_ss || pi->mclk_ss)
2333 pi->dynamic_ss = true;
2334 else
2335 pi->dynamic_ss = false;
2336}
2337
2320int rv770_dpm_init(struct radeon_device *rdev) 2338int rv770_dpm_init(struct radeon_device *rdev)
2321{ 2339{
2322 struct rv7xx_power_info *pi; 2340 struct rv7xx_power_info *pi;
2323 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2324 uint16_t data_offset, size;
2325 uint8_t frev, crev;
2326 struct atom_clock_dividers dividers; 2341 struct atom_clock_dividers dividers;
2327 int ret; 2342 int ret;
2328 2343
@@ -2367,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
2367 pi->mvdd_control = 2382 pi->mvdd_control =
2368 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); 2383 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2369 2384
2370 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 2385 rv770_get_engine_memory_ss(rdev);
2371 &frev, &crev, &data_offset)) {
2372 pi->sclk_ss = true;
2373 pi->mclk_ss = true;
2374 pi->dynamic_ss = true;
2375 } else {
2376 pi->sclk_ss = false;
2377 pi->mclk_ss = false;
2378 pi->dynamic_ss = false;
2379 }
2380 2386
2381 pi->asi = RV770_ASI_DFLT; 2387 pi->asi = RV770_ASI_DFLT;
2382 pi->pasi = RV770_HASI_DFLT; 2388 pi->pasi = RV770_HASI_DFLT;
@@ -2391,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
2391 2397
2392 pi->dynamic_pcie_gen2 = true; 2398 pi->dynamic_pcie_gen2 = true;
2393 2399
2394 if (pi->gfx_clock_gating && 2400 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2395 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2396 pi->thermal_protection = true; 2401 pi->thermal_protection = true;
2397 else 2402 else
2398 pi->thermal_protection = false; 2403 pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 96b1b2a62a8a..9244effc6b59 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
275void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, 275void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
276 struct radeon_ps *new_ps, 276 struct radeon_ps *new_ps,
277 struct radeon_ps *old_ps); 277 struct radeon_ps *old_ps);
278void rv770_get_engine_memory_ss(struct radeon_device *rdev);
278 279
279/* smc */ 280/* smc */
280int rv770_read_smc_soft_register(struct radeon_device *rdev, 281int rv770_read_smc_soft_register(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 234906709067..daa8d2df8ec5 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -22,7 +22,6 @@
22 * Authors: Alex Deucher 22 * Authors: Alex Deucher
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <drm/drmP.h> 27#include <drm/drmP.h>
@@ -1541,7 +1540,6 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
1541 1540
1542static int si_init_microcode(struct radeon_device *rdev) 1541static int si_init_microcode(struct radeon_device *rdev)
1543{ 1542{
1544 struct platform_device *pdev;
1545 const char *chip_name; 1543 const char *chip_name;
1546 const char *rlc_chip_name; 1544 const char *rlc_chip_name;
1547 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; 1545 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
@@ -1551,13 +1549,6 @@ static int si_init_microcode(struct radeon_device *rdev)
1551 1549
1552 DRM_DEBUG("\n"); 1550 DRM_DEBUG("\n");
1553 1551
1554 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1555 err = IS_ERR(pdev);
1556 if (err) {
1557 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1558 return -EINVAL;
1559 }
1560
1561 switch (rdev->family) { 1552 switch (rdev->family) {
1562 case CHIP_TAHITI: 1553 case CHIP_TAHITI:
1563 chip_name = "TAHITI"; 1554 chip_name = "TAHITI";
@@ -1615,7 +1606,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1615 DRM_INFO("Loading %s Microcode\n", chip_name); 1606 DRM_INFO("Loading %s Microcode\n", chip_name);
1616 1607
1617 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1608 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1618 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 1609 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1619 if (err) 1610 if (err)
1620 goto out; 1611 goto out;
1621 if (rdev->pfp_fw->size != pfp_req_size) { 1612 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -1627,7 +1618,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1627 } 1618 }
1628 1619
1629 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 1620 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1630 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 1621 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1631 if (err) 1622 if (err)
1632 goto out; 1623 goto out;
1633 if (rdev->me_fw->size != me_req_size) { 1624 if (rdev->me_fw->size != me_req_size) {
@@ -1638,7 +1629,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1638 } 1629 }
1639 1630
1640 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 1631 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1641 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); 1632 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1642 if (err) 1633 if (err)
1643 goto out; 1634 goto out;
1644 if (rdev->ce_fw->size != ce_req_size) { 1635 if (rdev->ce_fw->size != ce_req_size) {
@@ -1649,7 +1640,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1649 } 1640 }
1650 1641
1651 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 1642 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1652 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 1643 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1653 if (err) 1644 if (err)
1654 goto out; 1645 goto out;
1655 if (rdev->rlc_fw->size != rlc_req_size) { 1646 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -1660,7 +1651,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1660 } 1651 }
1661 1652
1662 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1653 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1663 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 1654 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1664 if (err) 1655 if (err)
1665 goto out; 1656 goto out;
1666 if (rdev->mc_fw->size != mc_req_size) { 1657 if (rdev->mc_fw->size != mc_req_size) {
@@ -1671,10 +1662,14 @@ static int si_init_microcode(struct radeon_device *rdev)
1671 } 1662 }
1672 1663
1673 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 1664 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1674 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); 1665 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1675 if (err) 1666 if (err) {
1676 goto out; 1667 printk(KERN_ERR
1677 if (rdev->smc_fw->size != smc_req_size) { 1668 "smc: error loading firmware \"%s\"\n",
1669 fw_name);
1670 release_firmware(rdev->smc_fw);
1671 rdev->smc_fw = NULL;
1672 } else if (rdev->smc_fw->size != smc_req_size) {
1678 printk(KERN_ERR 1673 printk(KERN_ERR
1679 "si_smc: Bogus length %zu in firmware \"%s\"\n", 1674 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1680 rdev->smc_fw->size, fw_name); 1675 rdev->smc_fw->size, fw_name);
@@ -1682,8 +1677,6 @@ static int si_init_microcode(struct radeon_device *rdev)
1682 } 1677 }
1683 1678
1684out: 1679out:
1685 platform_device_unregister(pdev);
1686
1687 if (err) { 1680 if (err) {
1688 if (err != -EINVAL) 1681 if (err != -EINVAL)
1689 printk(KERN_ERR 1682 printk(KERN_ERR
@@ -4401,6 +4394,270 @@ void si_vm_fini(struct radeon_device *rdev)
4401} 4394}
4402 4395
4403/** 4396/**
4397 * si_vm_decode_fault - print human readable fault info
4398 *
4399 * @rdev: radeon_device pointer
4400 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4401 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4402 *
4403 * Print human readable fault information (SI).
4404 */
4405static void si_vm_decode_fault(struct radeon_device *rdev,
4406 u32 status, u32 addr)
4407{
4408 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4409 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4410 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4411 char *block;
4412
4413 if (rdev->family == CHIP_TAHITI) {
4414 switch (mc_id) {
4415 case 160:
4416 case 144:
4417 case 96:
4418 case 80:
4419 case 224:
4420 case 208:
4421 case 32:
4422 case 16:
4423 block = "CB";
4424 break;
4425 case 161:
4426 case 145:
4427 case 97:
4428 case 81:
4429 case 225:
4430 case 209:
4431 case 33:
4432 case 17:
4433 block = "CB_FMASK";
4434 break;
4435 case 162:
4436 case 146:
4437 case 98:
4438 case 82:
4439 case 226:
4440 case 210:
4441 case 34:
4442 case 18:
4443 block = "CB_CMASK";
4444 break;
4445 case 163:
4446 case 147:
4447 case 99:
4448 case 83:
4449 case 227:
4450 case 211:
4451 case 35:
4452 case 19:
4453 block = "CB_IMMED";
4454 break;
4455 case 164:
4456 case 148:
4457 case 100:
4458 case 84:
4459 case 228:
4460 case 212:
4461 case 36:
4462 case 20:
4463 block = "DB";
4464 break;
4465 case 165:
4466 case 149:
4467 case 101:
4468 case 85:
4469 case 229:
4470 case 213:
4471 case 37:
4472 case 21:
4473 block = "DB_HTILE";
4474 break;
4475 case 167:
4476 case 151:
4477 case 103:
4478 case 87:
4479 case 231:
4480 case 215:
4481 case 39:
4482 case 23:
4483 block = "DB_STEN";
4484 break;
4485 case 72:
4486 case 68:
4487 case 64:
4488 case 8:
4489 case 4:
4490 case 0:
4491 case 136:
4492 case 132:
4493 case 128:
4494 case 200:
4495 case 196:
4496 case 192:
4497 block = "TC";
4498 break;
4499 case 112:
4500 case 48:
4501 block = "CP";
4502 break;
4503 case 49:
4504 case 177:
4505 case 50:
4506 case 178:
4507 block = "SH";
4508 break;
4509 case 53:
4510 case 190:
4511 block = "VGT";
4512 break;
4513 case 117:
4514 block = "IH";
4515 break;
4516 case 51:
4517 case 115:
4518 block = "RLC";
4519 break;
4520 case 119:
4521 case 183:
4522 block = "DMA0";
4523 break;
4524 case 61:
4525 block = "DMA1";
4526 break;
4527 case 248:
4528 case 120:
4529 block = "HDP";
4530 break;
4531 default:
4532 block = "unknown";
4533 break;
4534 }
4535 } else {
4536 switch (mc_id) {
4537 case 32:
4538 case 16:
4539 case 96:
4540 case 80:
4541 case 160:
4542 case 144:
4543 case 224:
4544 case 208:
4545 block = "CB";
4546 break;
4547 case 33:
4548 case 17:
4549 case 97:
4550 case 81:
4551 case 161:
4552 case 145:
4553 case 225:
4554 case 209:
4555 block = "CB_FMASK";
4556 break;
4557 case 34:
4558 case 18:
4559 case 98:
4560 case 82:
4561 case 162:
4562 case 146:
4563 case 226:
4564 case 210:
4565 block = "CB_CMASK";
4566 break;
4567 case 35:
4568 case 19:
4569 case 99:
4570 case 83:
4571 case 163:
4572 case 147:
4573 case 227:
4574 case 211:
4575 block = "CB_IMMED";
4576 break;
4577 case 36:
4578 case 20:
4579 case 100:
4580 case 84:
4581 case 164:
4582 case 148:
4583 case 228:
4584 case 212:
4585 block = "DB";
4586 break;
4587 case 37:
4588 case 21:
4589 case 101:
4590 case 85:
4591 case 165:
4592 case 149:
4593 case 229:
4594 case 213:
4595 block = "DB_HTILE";
4596 break;
4597 case 39:
4598 case 23:
4599 case 103:
4600 case 87:
4601 case 167:
4602 case 151:
4603 case 231:
4604 case 215:
4605 block = "DB_STEN";
4606 break;
4607 case 72:
4608 case 68:
4609 case 8:
4610 case 4:
4611 case 136:
4612 case 132:
4613 case 200:
4614 case 196:
4615 block = "TC";
4616 break;
4617 case 112:
4618 case 48:
4619 block = "CP";
4620 break;
4621 case 49:
4622 case 177:
4623 case 50:
4624 case 178:
4625 block = "SH";
4626 break;
4627 case 53:
4628 block = "VGT";
4629 break;
4630 case 117:
4631 block = "IH";
4632 break;
4633 case 51:
4634 case 115:
4635 block = "RLC";
4636 break;
4637 case 119:
4638 case 183:
4639 block = "DMA0";
4640 break;
4641 case 61:
4642 block = "DMA1";
4643 break;
4644 case 248:
4645 case 120:
4646 block = "HDP";
4647 break;
4648 default:
4649 block = "unknown";
4650 break;
4651 }
4652 }
4653
4654 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
4655 protections, vmid, addr,
4656 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4657 block, mc_id);
4658}
4659
4660/**
4404 * si_vm_set_page - update the page tables using the CP 4661 * si_vm_set_page - update the page tables using the CP
4405 * 4662 *
4406 * @rdev: radeon_device pointer 4663 * @rdev: radeon_device pointer
@@ -4962,14 +5219,12 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
4962 5219
4963static void si_init_cg(struct radeon_device *rdev) 5220static void si_init_cg(struct radeon_device *rdev)
4964{ 5221{
4965 bool has_uvd = true;
4966
4967 si_enable_mgcg(rdev, true); 5222 si_enable_mgcg(rdev, true);
4968 si_enable_cgcg(rdev, true); 5223 si_enable_cgcg(rdev, false);
4969 /* disable MC LS on Tahiti */ 5224 /* disable MC LS on Tahiti */
4970 if (rdev->family == CHIP_TAHITI) 5225 if (rdev->family == CHIP_TAHITI)
4971 si_enable_mc_ls(rdev, false); 5226 si_enable_mc_ls(rdev, false);
4972 if (has_uvd) { 5227 if (rdev->has_uvd) {
4973 si_enable_uvd_mgcg(rdev, true); 5228 si_enable_uvd_mgcg(rdev, true);
4974 si_init_uvd_internal_cg(rdev); 5229 si_init_uvd_internal_cg(rdev);
4975 } 5230 }
@@ -4977,9 +5232,7 @@ static void si_init_cg(struct radeon_device *rdev)
4977 5232
4978static void si_fini_cg(struct radeon_device *rdev) 5233static void si_fini_cg(struct radeon_device *rdev)
4979{ 5234{
4980 bool has_uvd = true; 5235 if (rdev->has_uvd)
4981
4982 if (has_uvd)
4983 si_enable_uvd_mgcg(rdev, false); 5236 si_enable_uvd_mgcg(rdev, false);
4984 si_enable_cgcg(rdev, false); 5237 si_enable_cgcg(rdev, false);
4985 si_enable_mgcg(rdev, false); 5238 si_enable_mgcg(rdev, false);
@@ -4988,11 +5241,11 @@ static void si_fini_cg(struct radeon_device *rdev)
4988static void si_init_pg(struct radeon_device *rdev) 5241static void si_init_pg(struct radeon_device *rdev)
4989{ 5242{
4990 bool has_pg = false; 5243 bool has_pg = false;
4991 5244#if 0
4992 /* only cape verde supports PG */ 5245 /* only cape verde supports PG */
4993 if (rdev->family == CHIP_VERDE) 5246 if (rdev->family == CHIP_VERDE)
4994 has_pg = true; 5247 has_pg = true;
4995 5248#endif
4996 if (has_pg) { 5249 if (has_pg) {
4997 si_init_ao_cu_mask(rdev); 5250 si_init_ao_cu_mask(rdev);
4998 si_init_dma_pg(rdev); 5251 si_init_dma_pg(rdev);
@@ -5766,6 +6019,7 @@ int si_irq_process(struct radeon_device *rdev)
5766 u32 ring_index; 6019 u32 ring_index;
5767 bool queue_hotplug = false; 6020 bool queue_hotplug = false;
5768 bool queue_thermal = false; 6021 bool queue_thermal = false;
6022 u32 status, addr;
5769 6023
5770 if (!rdev->ih.enabled || rdev->shutdown) 6024 if (!rdev->ih.enabled || rdev->shutdown)
5771 return IRQ_NONE; 6025 return IRQ_NONE;
@@ -6001,11 +6255,14 @@ restart_ih:
6001 break; 6255 break;
6002 case 146: 6256 case 146:
6003 case 147: 6257 case 147:
6258 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6259 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
6004 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 6260 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6005 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 6261 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
6006 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 6262 addr);
6007 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 6263 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6008 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 6264 status);
6265 si_vm_decode_fault(rdev, status, addr);
6009 /* reset addr and status */ 6266 /* reset addr and status */
6010 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 6267 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6011 break; 6268 break;
@@ -6165,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev)
6165 /* enable aspm */ 6422 /* enable aspm */
6166 si_program_aspm(rdev); 6423 si_program_aspm(rdev);
6167 6424
6425 si_mc_program(rdev);
6426
6168 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 6427 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
6169 !rdev->rlc_fw || !rdev->mc_fw) { 6428 !rdev->rlc_fw || !rdev->mc_fw) {
6170 r = si_init_microcode(rdev); 6429 r = si_init_microcode(rdev);
@@ -6184,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev)
6184 if (r) 6443 if (r)
6185 return r; 6444 return r;
6186 6445
6187 si_mc_program(rdev);
6188 r = si_pcie_gart_enable(rdev); 6446 r = si_pcie_gart_enable(rdev);
6189 if (r) 6447 if (r)
6190 return r; 6448 return r;
@@ -6368,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev)
6368 si_cp_enable(rdev, false); 6626 si_cp_enable(rdev, false);
6369 cayman_dma_stop(rdev); 6627 cayman_dma_stop(rdev);
6370 if (rdev->has_uvd) { 6628 if (rdev->has_uvd) {
6371 r600_uvd_rbc_stop(rdev); 6629 r600_uvd_stop(rdev);
6372 radeon_uvd_suspend(rdev); 6630 radeon_uvd_suspend(rdev);
6373 } 6631 }
6374 si_irq_suspend(rdev); 6632 si_irq_suspend(rdev);
@@ -6510,8 +6768,10 @@ void si_fini(struct radeon_device *rdev)
6510 radeon_vm_manager_fini(rdev); 6768 radeon_vm_manager_fini(rdev);
6511 radeon_ib_pool_fini(rdev); 6769 radeon_ib_pool_fini(rdev);
6512 radeon_irq_kms_fini(rdev); 6770 radeon_irq_kms_fini(rdev);
6513 if (rdev->has_uvd) 6771 if (rdev->has_uvd) {
6772 r600_uvd_stop(rdev);
6514 radeon_uvd_fini(rdev); 6773 radeon_uvd_fini(rdev);
6774 }
6515 si_pcie_gart_fini(rdev); 6775 si_pcie_gart_fini(rdev);
6516 r600_vram_scratch_fini(rdev); 6776 r600_vram_scratch_fini(rdev);
6517 radeon_gem_fini(rdev); 6777 radeon_gem_fini(rdev);
@@ -6796,6 +7056,9 @@ static void si_program_aspm(struct radeon_device *rdev)
6796 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; 7056 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
6797 bool disable_clkreq = false; 7057 bool disable_clkreq = false;
6798 7058
7059 if (radeon_aspm == 0)
7060 return;
7061
6799 if (!(rdev->flags & RADEON_IS_PCIE)) 7062 if (!(rdev->flags & RADEON_IS_PCIE))
6800 return; 7063 return;
6801 7064
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 73aaa2e4c312..88699e3cd868 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -37,8 +37,6 @@
37 37
38#define SMC_RAM_END 0x20000 38#define SMC_RAM_END 0x20000
39 39
40#define DDR3_DRAM_ROWS 0x2000
41
42#define SCLK_MIN_DEEPSLEEP_FREQ 1350 40#define SCLK_MIN_DEEPSLEEP_FREQ 1350
43 41
44static const struct si_cac_config_reg cac_weights_tahiti[] = 42static const struct si_cac_config_reg cac_weights_tahiti[] =
@@ -1767,8 +1765,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
1767{ 1765{
1768 s64 kt, kv, leakage_w, i_leakage, vddc; 1766 s64 kt, kv, leakage_w, i_leakage, vddc;
1769 s64 temperature, t_slope, t_intercept, av, bv, t_ref; 1767 s64 temperature, t_slope, t_intercept, av, bv, t_ref;
1768 s64 tmp;
1770 1769
1771 i_leakage = drm_int2fixp(ileakage / 100); 1770 i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1772 vddc = div64_s64(drm_int2fixp(v), 1000); 1771 vddc = div64_s64(drm_int2fixp(v), 1000);
1773 temperature = div64_s64(drm_int2fixp(t), 1000); 1772 temperature = div64_s64(drm_int2fixp(t), 1000);
1774 1773
@@ -1778,8 +1777,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
1778 bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); 1777 bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
1779 t_ref = drm_int2fixp(coeff->t_ref); 1778 t_ref = drm_int2fixp(coeff->t_ref);
1780 1779
1781 kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)), 1780 tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
1782 drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref))); 1781 kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
1782 kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
1783 kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); 1783 kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
1784 1784
1785 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); 1785 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
@@ -1931,6 +1931,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1931 si_pi->cac_override = cac_override_pitcairn; 1931 si_pi->cac_override = cac_override_pitcairn;
1932 si_pi->powertune_data = &powertune_data_pitcairn; 1932 si_pi->powertune_data = &powertune_data_pitcairn;
1933 si_pi->dte_data = dte_data_pitcairn; 1933 si_pi->dte_data = dte_data_pitcairn;
1934 break;
1934 } 1935 }
1935 } else if (rdev->family == CHIP_VERDE) { 1936 } else if (rdev->family == CHIP_VERDE) {
1936 si_pi->lcac_config = lcac_cape_verde; 1937 si_pi->lcac_config = lcac_cape_verde;
@@ -1941,6 +1942,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1941 case 0x683B: 1942 case 0x683B:
1942 case 0x683F: 1943 case 0x683F:
1943 case 0x6829: 1944 case 0x6829:
1945 case 0x6835:
1944 si_pi->cac_weights = cac_weights_cape_verde_pro; 1946 si_pi->cac_weights = cac_weights_cape_verde_pro;
1945 si_pi->dte_data = dte_data_cape_verde; 1947 si_pi->dte_data = dte_data_cape_verde;
1946 break; 1948 break;
@@ -2901,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2901{ 2903{
2902 struct ni_ps *ps = ni_get_ps(rps); 2904 struct ni_ps *ps = ni_get_ps(rps);
2903 struct radeon_clock_and_voltage_limits *max_limits; 2905 struct radeon_clock_and_voltage_limits *max_limits;
2904 bool disable_mclk_switching; 2906 bool disable_mclk_switching = false;
2907 bool disable_sclk_switching = false;
2905 u32 mclk, sclk; 2908 u32 mclk, sclk;
2906 u16 vddc, vddci; 2909 u16 vddc, vddci;
2907 int i; 2910 int i;
@@ -2909,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2909 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2912 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2910 ni_dpm_vblank_too_short(rdev)) 2913 ni_dpm_vblank_too_short(rdev))
2911 disable_mclk_switching = true; 2914 disable_mclk_switching = true;
2912 else 2915
2913 disable_mclk_switching = false; 2916 if (rps->vclk || rps->dclk) {
2917 disable_mclk_switching = true;
2918 disable_sclk_switching = true;
2919 }
2914 2920
2915 if (rdev->pm.dpm.ac_power) 2921 if (rdev->pm.dpm.ac_power)
2916 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2922 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
@@ -2938,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2938 2944
2939 if (disable_mclk_switching) { 2945 if (disable_mclk_switching) {
2940 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 2946 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
2941 sclk = ps->performance_levels[0].sclk;
2942 vddc = ps->performance_levels[0].vddc;
2943 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 2947 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
2944 } else { 2948 } else {
2945 sclk = ps->performance_levels[0].sclk;
2946 mclk = ps->performance_levels[0].mclk; 2949 mclk = ps->performance_levels[0].mclk;
2947 vddc = ps->performance_levels[0].vddc;
2948 vddci = ps->performance_levels[0].vddci; 2950 vddci = ps->performance_levels[0].vddci;
2949 } 2951 }
2950 2952
2953 if (disable_sclk_switching) {
2954 sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
2955 vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
2956 } else {
2957 sclk = ps->performance_levels[0].sclk;
2958 vddc = ps->performance_levels[0].vddc;
2959 }
2960
2951 /* adjusted low state */ 2961 /* adjusted low state */
2952 ps->performance_levels[0].sclk = sclk; 2962 ps->performance_levels[0].sclk = sclk;
2953 ps->performance_levels[0].mclk = mclk; 2963 ps->performance_levels[0].mclk = mclk;
2954 ps->performance_levels[0].vddc = vddc; 2964 ps->performance_levels[0].vddc = vddc;
2955 ps->performance_levels[0].vddci = vddci; 2965 ps->performance_levels[0].vddci = vddci;
2956 2966
2957 for (i = 1; i < ps->performance_level_count; i++) { 2967 if (disable_sclk_switching) {
2958 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) 2968 sclk = ps->performance_levels[0].sclk;
2959 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; 2969 for (i = 1; i < ps->performance_level_count; i++) {
2960 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) 2970 if (sclk < ps->performance_levels[i].sclk)
2961 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 2971 sclk = ps->performance_levels[i].sclk;
2972 }
2973 for (i = 0; i < ps->performance_level_count; i++) {
2974 ps->performance_levels[i].sclk = sclk;
2975 ps->performance_levels[i].vddc = vddc;
2976 }
2977 } else {
2978 for (i = 1; i < ps->performance_level_count; i++) {
2979 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
2980 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
2981 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
2982 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
2983 }
2962 } 2984 }
2963 2985
2964 if (disable_mclk_switching) { 2986 if (disable_mclk_switching) {
@@ -3237,10 +3259,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
3237{ 3259{
3238 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 3260 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
3239 struct ni_ps *ps = ni_get_ps(rps); 3261 struct ni_ps *ps = ni_get_ps(rps);
3240 u32 levels; 3262 u32 levels = ps->performance_level_count;
3241 3263
3242 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 3264 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3243 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) 3265 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3244 return -EINVAL; 3266 return -EINVAL;
3245 3267
3246 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) 3268 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
@@ -3249,14 +3271,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
3249 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 3271 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3250 return -EINVAL; 3272 return -EINVAL;
3251 3273
3252 levels = ps->performance_level_count - 1; 3274 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
3253 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3254 return -EINVAL; 3275 return -EINVAL;
3255 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 3276 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3256 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 3277 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3257 return -EINVAL; 3278 return -EINVAL;
3258 3279
3259 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) 3280 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3260 return -EINVAL; 3281 return -EINVAL;
3261 } 3282 }
3262 3283
@@ -3620,8 +3641,12 @@ static void si_enable_display_gap(struct radeon_device *rdev)
3620{ 3641{
3621 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 3642 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
3622 3643
3644 tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
3645 tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
3646 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
3647
3623 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 3648 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
3624 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | 3649 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
3625 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); 3650 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
3626 WREG32(CG_DISPLAY_GAP_CNTL, tmp); 3651 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
3627} 3652}
@@ -4036,16 +4061,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
4036static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, 4061static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
4037 u32 engine_clock) 4062 u32 engine_clock)
4038{ 4063{
4039 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4040 u32 dram_rows; 4064 u32 dram_rows;
4041 u32 dram_refresh_rate; 4065 u32 dram_refresh_rate;
4042 u32 mc_arb_rfsh_rate; 4066 u32 mc_arb_rfsh_rate;
4043 u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 4067 u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
4044 4068
4045 if (pi->mem_gddr5) 4069 if (tmp >= 4)
4046 dram_rows = 1 << (tmp + 10); 4070 dram_rows = 16384;
4047 else 4071 else
4048 dram_rows = DDR3_DRAM_ROWS; 4072 dram_rows = 1 << (tmp + 10);
4049 4073
4050 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); 4074 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
4051 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; 4075 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
@@ -6013,16 +6037,11 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
6013 return ret; 6037 return ret;
6014 } 6038 }
6015 6039
6016#if 0
6017 /* XXX */
6018 ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 6040 ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
6019 if (ret) { 6041 if (ret) {
6020 DRM_ERROR("si_dpm_force_performance_level failed\n"); 6042 DRM_ERROR("si_dpm_force_performance_level failed\n");
6021 return ret; 6043 return ret;
6022 } 6044 }
6023#else
6024 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
6025#endif
6026 6045
6027 return 0; 6046 return 0;
6028} 6047}
@@ -6254,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev)
6254 struct evergreen_power_info *eg_pi; 6273 struct evergreen_power_info *eg_pi;
6255 struct ni_power_info *ni_pi; 6274 struct ni_power_info *ni_pi;
6256 struct si_power_info *si_pi; 6275 struct si_power_info *si_pi;
6257 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
6258 u16 data_offset, size;
6259 u8 frev, crev;
6260 struct atom_clock_dividers dividers; 6276 struct atom_clock_dividers dividers;
6261 int ret; 6277 int ret;
6262 u32 mask; 6278 u32 mask;
@@ -6347,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev)
6347 si_pi->vddc_phase_shed_control = 6363 si_pi->vddc_phase_shed_control =
6348 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); 6364 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
6349 6365
6350 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 6366 rv770_get_engine_memory_ss(rdev);
6351 &frev, &crev, &data_offset)) {
6352 pi->sclk_ss = true;
6353 pi->mclk_ss = true;
6354 pi->dynamic_ss = true;
6355 } else {
6356 pi->sclk_ss = false;
6357 pi->mclk_ss = false;
6358 pi->dynamic_ss = true;
6359 }
6360 6367
6361 pi->asi = RV770_ASI_DFLT; 6368 pi->asi = RV770_ASI_DFLT;
6362 pi->pasi = CYPRESS_HASI_DFLT; 6369 pi->pasi = CYPRESS_HASI_DFLT;
@@ -6367,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev)
6367 eg_pi->sclk_deep_sleep = true; 6374 eg_pi->sclk_deep_sleep = true;
6368 si_pi->sclk_deep_sleep_above_low = false; 6375 si_pi->sclk_deep_sleep_above_low = false;
6369 6376
6370 if (pi->gfx_clock_gating && 6377 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6371 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
6372 pi->thermal_protection = true; 6378 pi->thermal_protection = true;
6373 else 6379 else
6374 pi->thermal_protection = false; 6380 pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 12a20eb77d0c..2c8da27a929f 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -367,6 +367,20 @@
367 367
368#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC 368#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
369#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC 369#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
370#define PROTECTIONS_MASK (0xf << 0)
371#define PROTECTIONS_SHIFT 0
372 /* bit 0: range
373 * bit 1: pde0
374 * bit 2: valid
375 * bit 3: read
376 * bit 4: write
377 */
378#define MEMORY_CLIENT_ID_MASK (0xff << 12)
379#define MEMORY_CLIENT_ID_SHIFT 12
380#define MEMORY_CLIENT_RW_MASK (1 << 24)
381#define MEMORY_CLIENT_RW_SHIFT 24
382#define FAULT_VMID_MASK (0xf << 25)
383#define FAULT_VMID_SHIFT 25
370 384
371#define VM_INVALIDATE_REQUEST 0x1478 385#define VM_INVALIDATE_REQUEST 0x1478
372#define VM_INVALIDATE_RESPONSE 0x147c 386#define VM_INVALIDATE_RESPONSE 0x147c
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 11b6b9924f1b..c0a850319908 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1732,7 +1732,13 @@ int sumo_dpm_init(struct radeon_device *rdev)
1732 pi->enable_sclk_ds = true; 1732 pi->enable_sclk_ds = true;
1733 pi->enable_dynamic_m3_arbiter = false; 1733 pi->enable_dynamic_m3_arbiter = false;
1734 pi->enable_dynamic_patch_ps = true; 1734 pi->enable_dynamic_patch_ps = true;
1735 pi->enable_gfx_power_gating = true; 1735 /* Some PALM chips don't seem to properly ungate gfx when UVD is in use;
1736 * for now just disable gfx PG.
1737 */
1738 if (rdev->family == CHIP_PALM)
1739 pi->enable_gfx_power_gating = false;
1740 else
1741 pi->enable_gfx_power_gating = true;
1736 pi->enable_gfx_clock_gating = true; 1742 pi->enable_gfx_clock_gating = true;
1737 pi->enable_mg_clock_gating = true; 1743 pi->enable_mg_clock_gating = true;
1738 pi->enable_auto_thermal_throttling = true; 1744 pi->enable_auto_thermal_throttling = true;
@@ -1845,6 +1851,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
1845 return 0; 1851 return 0;
1846 1852
1847 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1853 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1854 if (pi->enable_boost)
1855 sumo_enable_boost(rdev, rps, false);
1848 sumo_power_level_enable(rdev, ps->num_levels - 1, true); 1856 sumo_power_level_enable(rdev, ps->num_levels - 1, true);
1849 sumo_set_forced_level(rdev, ps->num_levels - 1); 1857 sumo_set_forced_level(rdev, ps->num_levels - 1);
1850 sumo_set_forced_mode_enabled(rdev); 1858 sumo_set_forced_mode_enabled(rdev);
@@ -1855,6 +1863,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
1855 sumo_set_forced_mode_enabled(rdev); 1863 sumo_set_forced_mode_enabled(rdev);
1856 sumo_set_forced_mode(rdev, false); 1864 sumo_set_forced_mode(rdev, false);
1857 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1865 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1866 if (pi->enable_boost)
1867 sumo_enable_boost(rdev, rps, false);
1858 sumo_power_level_enable(rdev, 0, true); 1868 sumo_power_level_enable(rdev, 0, true);
1859 sumo_set_forced_level(rdev, 0); 1869 sumo_set_forced_level(rdev, 0);
1860 sumo_set_forced_mode_enabled(rdev); 1870 sumo_set_forced_mode_enabled(rdev);
@@ -1868,6 +1878,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
1868 for (i = 0; i < ps->num_levels; i++) { 1878 for (i = 0; i < ps->num_levels; i++) {
1869 sumo_power_level_enable(rdev, i, true); 1879 sumo_power_level_enable(rdev, i, true);
1870 } 1880 }
1881 if (pi->enable_boost)
1882 sumo_enable_boost(rdev, rps, true);
1871 } 1883 }
1872 1884
1873 rdev->pm.dpm.forced_level = level; 1885 rdev->pm.dpm.forced_level = level;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index ff82877de876..dc0fe09b2ba1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -249,8 +249,13 @@ static struct drm_driver rcar_du_driver = {
249 .gem_vm_ops = &drm_gem_cma_vm_ops, 249 .gem_vm_ops = &drm_gem_cma_vm_ops,
250 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 250 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
251 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 251 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
252 .gem_prime_import = drm_gem_cma_dmabuf_import, 252 .gem_prime_import = drm_gem_prime_import,
253 .gem_prime_export = drm_gem_cma_dmabuf_export, 253 .gem_prime_export = drm_gem_prime_export,
254 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
255 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
256 .gem_prime_vmap = drm_gem_cma_prime_vmap,
257 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
258 .gem_prime_mmap = drm_gem_cma_prime_mmap,
254 .dumb_create = rcar_du_dumb_create, 259 .dumb_create = rcar_du_dumb_create,
255 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 260 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
256 .dumb_destroy = drm_gem_cma_dumb_destroy, 261 .dumb_destroy = drm_gem_cma_dumb_destroy,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index edc10181f551..5f83f9a3ef59 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -276,8 +276,13 @@ static struct drm_driver shmob_drm_driver = {
276 .gem_vm_ops = &drm_gem_cma_vm_ops, 276 .gem_vm_ops = &drm_gem_cma_vm_ops,
277 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 277 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
278 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 278 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
279 .gem_prime_import = drm_gem_cma_dmabuf_import, 279 .gem_prime_import = drm_gem_prime_import,
280 .gem_prime_export = drm_gem_cma_dmabuf_export, 280 .gem_prime_export = drm_gem_prime_export,
281 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
282 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
283 .gem_prime_vmap = drm_gem_cma_prime_vmap,
284 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
285 .gem_prime_mmap = drm_gem_cma_prime_mmap,
281 .dumb_create = drm_gem_cma_dumb_create, 286 .dumb_create = drm_gem_cma_dumb_create,
282 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 287 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
283 .dumb_destroy = drm_gem_cma_dumb_destroy, 288 .dumb_destroy = drm_gem_cma_dumb_destroy,
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5207591a598c..cd33084c7860 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -192,6 +192,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
192static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, 192static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
193 size_t count, 193 size_t count,
194 unsigned char report_type); 194 unsigned char report_type);
195static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
195 196
196static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, 197static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
197 struct dj_report *dj_report) 198 struct dj_report *dj_report)
@@ -232,6 +233,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
232 if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & 233 if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
233 SPFUNCTION_DEVICE_LIST_EMPTY) { 234 SPFUNCTION_DEVICE_LIST_EMPTY) {
234 dbg_hid("%s: device list is empty\n", __func__); 235 dbg_hid("%s: device list is empty\n", __func__);
236 djrcv_dev->querying_devices = false;
235 return; 237 return;
236 } 238 }
237 239
@@ -242,6 +244,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
242 return; 244 return;
243 } 245 }
244 246
247 if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
248 /* The device is already known. No need to reallocate it. */
249 dbg_hid("%s: device is already known\n", __func__);
250 return;
251 }
252
245 dj_hiddev = hid_allocate_device(); 253 dj_hiddev = hid_allocate_device();
246 if (IS_ERR(dj_hiddev)) { 254 if (IS_ERR(dj_hiddev)) {
247 dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n", 255 dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
@@ -305,6 +313,7 @@ static void delayedwork_callback(struct work_struct *work)
305 struct dj_report dj_report; 313 struct dj_report dj_report;
306 unsigned long flags; 314 unsigned long flags;
307 int count; 315 int count;
316 int retval;
308 317
309 dbg_hid("%s\n", __func__); 318 dbg_hid("%s\n", __func__);
310 319
@@ -337,6 +346,25 @@ static void delayedwork_callback(struct work_struct *work)
337 logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report); 346 logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
338 break; 347 break;
339 default: 348 default:
349 /* A normal report (i. e. not belonging to a pair/unpair notification)
350 * arriving here, means that the report arrived but we did not have a
351 * paired dj_device associated to the report's device_index, this
352 * means that the original "device paired" notification corresponding
353 * to this dj_device never arrived to this driver. The reason is that
354 * hid-core discards all packets coming from a device while probe() is
355 * executing. */
356 if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
357 /* ok, we don't know the device, just re-ask the
358 * receiver for the list of connected devices. */
359 retval = logi_dj_recv_query_paired_devices(djrcv_dev);
360 if (!retval) {
361 /* everything went fine, so just leave */
362 break;
363 }
364 dev_err(&djrcv_dev->hdev->dev,
365 "%s:logi_dj_recv_query_paired_devices "
366 "error:%d\n", __func__, retval);
367 }
340 dbg_hid("%s: unexpected report type\n", __func__); 368 dbg_hid("%s: unexpected report type\n", __func__);
341 } 369 }
342} 370}
@@ -367,6 +395,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
367 if (!djdev) { 395 if (!djdev) {
368 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" 396 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
369 " is NULL, index %d\n", dj_report->device_index); 397 " is NULL, index %d\n", dj_report->device_index);
398 kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
399
400 if (schedule_work(&djrcv_dev->work) == 0) {
401 dbg_hid("%s: did not schedule the work item, was already "
402 "queued\n", __func__);
403 }
370 return; 404 return;
371 } 405 }
372 406
@@ -397,6 +431,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
397 if (dj_device == NULL) { 431 if (dj_device == NULL) {
398 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" 432 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
399 " is NULL, index %d\n", dj_report->device_index); 433 " is NULL, index %d\n", dj_report->device_index);
434 kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
435
436 if (schedule_work(&djrcv_dev->work) == 0) {
437 dbg_hid("%s: did not schedule the work item, was already "
438 "queued\n", __func__);
439 }
400 return; 440 return;
401 } 441 }
402 442
@@ -444,6 +484,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
444 struct dj_report *dj_report; 484 struct dj_report *dj_report;
445 int retval; 485 int retval;
446 486
487 /* no need to protect djrcv_dev->querying_devices */
488 if (djrcv_dev->querying_devices)
489 return 0;
490
447 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); 491 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
448 if (!dj_report) 492 if (!dj_report)
449 return -ENOMEM; 493 return -ENOMEM;
@@ -455,6 +499,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
455 return retval; 499 return retval;
456} 500}
457 501
502
458static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, 503static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
459 unsigned timeout) 504 unsigned timeout)
460{ 505{
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
index fd28a5e0ca3b..4a4000340ce1 100644
--- a/drivers/hid/hid-logitech-dj.h
+++ b/drivers/hid/hid-logitech-dj.h
@@ -101,6 +101,7 @@ struct dj_receiver_dev {
101 struct work_struct work; 101 struct work_struct work;
102 struct kfifo notif_fifo; 102 struct kfifo notif_fifo;
103 spinlock_t lock; 103 spinlock_t lock;
104 bool querying_devices;
104}; 105};
105 106
106struct dj_device { 107struct dj_device {
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index ecbc74923d06..87fbe2924cfa 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -369,7 +369,8 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
369 if (sc->quirks & PS3REMOTE) 369 if (sc->quirks & PS3REMOTE)
370 return ps3remote_mapping(hdev, hi, field, usage, bit, max); 370 return ps3remote_mapping(hdev, hi, field, usage, bit, max);
371 371
372 return -1; 372 /* Let hid-core decide for the others */
373 return 0;
373} 374}
374 375
375/* 376/*
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index a7451632ceb4..6f1feb2c2e97 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -518,7 +518,6 @@ int hidraw_connect(struct hid_device *hid)
518 goto out; 518 goto out;
519 } 519 }
520 520
521 mutex_unlock(&minors_lock);
522 init_waitqueue_head(&dev->wait); 521 init_waitqueue_head(&dev->wait);
523 INIT_LIST_HEAD(&dev->list); 522 INIT_LIST_HEAD(&dev->list);
524 523
@@ -528,6 +527,7 @@ int hidraw_connect(struct hid_device *hid)
528 dev->exist = 1; 527 dev->exist = 1;
529 hid->hidraw = dev; 528 hid->hidraw = dev;
530 529
530 mutex_unlock(&minors_lock);
531out: 531out:
532 return result; 532 return result;
533 533
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 4c605c70ebf9..deb5c25305af 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
562 struct hv_hotadd_state *has) 562 struct hv_hotadd_state *has)
563{ 563{
564 int ret = 0; 564 int ret = 0;
565 int i, nid, t; 565 int i, nid;
566 unsigned long start_pfn; 566 unsigned long start_pfn;
567 unsigned long processed_pfn; 567 unsigned long processed_pfn;
568 unsigned long total_pfn = pfn_count; 568 unsigned long total_pfn = pfn_count;
@@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
607 607
608 /* 608 /*
609 * Wait for the memory block to be onlined. 609 * Wait for the memory block to be onlined.
610 * Since the hot add has succeeded, it is ok to
611 * proceed even if the pages in the hot added region
612 * have not been "onlined" within the allowed time.
610 */ 613 */
611 t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); 614 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
612 if (t == 0) {
613 pr_info("hot_add memory timedout\n");
614 has->ha_end_pfn -= HA_CHUNK;
615 has->covered_end_pfn -= processed_pfn;
616 break;
617 }
618 615
619 } 616 }
620 617
@@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm)
978 dm->num_pages_ballooned + 975 dm->num_pages_ballooned +
979 compute_balloon_floor(); 976 compute_balloon_floor();
980 977
978 /*
979 * If our transaction ID is no longer current, just don't
980 * send the status. This can happen if we were interrupted
981 * after we picked our transaction ID.
982 */
983 if (status.hdr.trans_id != atomic_read(&trans_id))
984 return;
985
981 vmbus_sendpacket(dm->dev->channel, &status, 986 vmbus_sendpacket(dm->dev->channel, &status,
982 sizeof(struct dm_status), 987 sizeof(struct dm_status),
983 (unsigned long)NULL, 988 (unsigned long)NULL,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a2464bf07c49..e8e071fc1d6d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -690,7 +690,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
690 if (ret) 690 if (ret)
691 pr_err("Unable to register child device\n"); 691 pr_err("Unable to register child device\n");
692 else 692 else
693 pr_info("child device %s registered\n", 693 pr_debug("child device %s registered\n",
694 dev_name(&child_device_obj->device)); 694 dev_name(&child_device_obj->device));
695 695
696 return ret; 696 return ret;
@@ -702,14 +702,14 @@ int vmbus_device_register(struct hv_device *child_device_obj)
702 */ 702 */
703void vmbus_device_unregister(struct hv_device *device_obj) 703void vmbus_device_unregister(struct hv_device *device_obj)
704{ 704{
705 pr_debug("child device %s unregistered\n",
706 dev_name(&device_obj->device));
707
705 /* 708 /*
706 * Kick off the process of unregistering the device. 709 * Kick off the process of unregistering the device.
707 * This will call vmbus_remove() and eventually vmbus_device_release() 710 * This will call vmbus_remove() and eventually vmbus_device_release()
708 */ 711 */
709 device_unregister(&device_obj->device); 712 device_unregister(&device_obj->device);
710
711 pr_info("child device %s unregistered\n",
712 dev_name(&device_obj->device));
713} 713}
714 714
715 715
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index eee1134274c8..769fe20ec938 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -315,7 +315,7 @@ static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IWUSR | S_IRUGO,
315static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 3); 315static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 3);
316static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 3); 316static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 3);
317 317
318struct attribute *abx500_temp_attributes[] = { 318static struct attribute *abx500_temp_attributes[] = {
319 &sensor_dev_attr_name.dev_attr.attr, 319 &sensor_dev_attr_name.dev_attr.attr,
320 320
321 &sensor_dev_attr_temp1_label.dev_attr.attr, 321 &sensor_dev_attr_temp1_label.dev_attr.attr,
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f34bca9f5e5..6099f50b28aa 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
215 u16 value) 215 u16 value)
216{ 216{
217 return i2c_smbus_write_byte_data(client, reg, value & 0xFF) 217 return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
218 && i2c_smbus_write_byte_data(client, reg + 1, value >> 8); 218 || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
219} 219}
220 220
221static void adt7470_init_client(struct i2c_client *client) 221static void adt7470_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index ade35cf3f488..2e5e2dc47eaf 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -195,7 +195,7 @@ struct tjmax {
195 int tjmax; 195 int tjmax;
196}; 196};
197 197
198static const struct tjmax __cpuinitconst tjmax_table[] = { 198static const struct tjmax tjmax_table[] = {
199 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */ 199 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
200 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */ 200 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
201 { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */ 201 { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
@@ -211,7 +211,7 @@ struct tjmax_model {
211 211
212#define ANY 0xff 212#define ANY 0xff
213 213
214static const struct tjmax_model __cpuinitconst tjmax_model_table[] = { 214static const struct tjmax_model tjmax_model_table[] = {
215 { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */ 215 { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
216 { 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others 216 { 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others
217 * Note: Also matches 230 and 330, 217 * Note: Also matches 230 and 330,
@@ -226,8 +226,7 @@ static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
226 { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */ 226 { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
227}; 227};
228 228
229static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, 229static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
230 struct device *dev)
231{ 230{
232 /* The 100C is default for both mobile and non mobile CPUs */ 231 /* The 100C is default for both mobile and non mobile CPUs */
233 232
@@ -317,8 +316,7 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
317 return tjmax; 316 return tjmax;
318} 317}
319 318
320static int __cpuinit get_tjmax(struct cpuinfo_x86 *c, u32 id, 319static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
321 struct device *dev)
322{ 320{
323 int err; 321 int err;
324 u32 eax, edx; 322 u32 eax, edx;
@@ -367,8 +365,8 @@ static int create_name_attr(struct platform_data *pdata,
367 return device_create_file(dev, &pdata->name_attr); 365 return device_create_file(dev, &pdata->name_attr);
368} 366}
369 367
370static int __cpuinit create_core_attrs(struct temp_data *tdata, 368static int create_core_attrs(struct temp_data *tdata, struct device *dev,
371 struct device *dev, int attr_no) 369 int attr_no)
372{ 370{
373 int err, i; 371 int err, i;
374 static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, 372 static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -401,7 +399,7 @@ exit_free:
401} 399}
402 400
403 401
404static int __cpuinit chk_ucode_version(unsigned int cpu) 402static int chk_ucode_version(unsigned int cpu)
405{ 403{
406 struct cpuinfo_x86 *c = &cpu_data(cpu); 404 struct cpuinfo_x86 *c = &cpu_data(cpu);
407 405
@@ -417,7 +415,7 @@ static int __cpuinit chk_ucode_version(unsigned int cpu)
417 return 0; 415 return 0;
418} 416}
419 417
420static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu) 418static struct platform_device *coretemp_get_pdev(unsigned int cpu)
421{ 419{
422 u16 phys_proc_id = TO_PHYS_ID(cpu); 420 u16 phys_proc_id = TO_PHYS_ID(cpu);
423 struct pdev_entry *p; 421 struct pdev_entry *p;
@@ -434,8 +432,7 @@ static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu)
434 return NULL; 432 return NULL;
435} 433}
436 434
437static struct temp_data __cpuinit *init_temp_data(unsigned int cpu, 435static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
438 int pkg_flag)
439{ 436{
440 struct temp_data *tdata; 437 struct temp_data *tdata;
441 438
@@ -453,8 +450,8 @@ static struct temp_data __cpuinit *init_temp_data(unsigned int cpu,
453 return tdata; 450 return tdata;
454} 451}
455 452
456static int __cpuinit create_core_data(struct platform_device *pdev, 453static int create_core_data(struct platform_device *pdev, unsigned int cpu,
457 unsigned int cpu, int pkg_flag) 454 int pkg_flag)
458{ 455{
459 struct temp_data *tdata; 456 struct temp_data *tdata;
460 struct platform_data *pdata = platform_get_drvdata(pdev); 457 struct platform_data *pdata = platform_get_drvdata(pdev);
@@ -524,7 +521,7 @@ exit_free:
524 return err; 521 return err;
525} 522}
526 523
527static void __cpuinit coretemp_add_core(unsigned int cpu, int pkg_flag) 524static void coretemp_add_core(unsigned int cpu, int pkg_flag)
528{ 525{
529 struct platform_device *pdev = coretemp_get_pdev(cpu); 526 struct platform_device *pdev = coretemp_get_pdev(cpu);
530 int err; 527 int err;
@@ -607,7 +604,7 @@ static struct platform_driver coretemp_driver = {
607 .remove = coretemp_remove, 604 .remove = coretemp_remove,
608}; 605};
609 606
610static int __cpuinit coretemp_device_add(unsigned int cpu) 607static int coretemp_device_add(unsigned int cpu)
611{ 608{
612 int err; 609 int err;
613 struct platform_device *pdev; 610 struct platform_device *pdev;
@@ -651,7 +648,7 @@ exit:
651 return err; 648 return err;
652} 649}
653 650
654static void __cpuinit coretemp_device_remove(unsigned int cpu) 651static void coretemp_device_remove(unsigned int cpu)
655{ 652{
656 struct pdev_entry *p, *n; 653 struct pdev_entry *p, *n;
657 u16 phys_proc_id = TO_PHYS_ID(cpu); 654 u16 phys_proc_id = TO_PHYS_ID(cpu);
@@ -667,7 +664,7 @@ static void __cpuinit coretemp_device_remove(unsigned int cpu)
667 mutex_unlock(&pdev_list_mutex); 664 mutex_unlock(&pdev_list_mutex);
668} 665}
669 666
670static bool __cpuinit is_any_core_online(struct platform_data *pdata) 667static bool is_any_core_online(struct platform_data *pdata)
671{ 668{
672 int i; 669 int i;
673 670
@@ -681,7 +678,7 @@ static bool __cpuinit is_any_core_online(struct platform_data *pdata)
681 return false; 678 return false;
682} 679}
683 680
684static void __cpuinit get_core_online(unsigned int cpu) 681static void get_core_online(unsigned int cpu)
685{ 682{
686 struct cpuinfo_x86 *c = &cpu_data(cpu); 683 struct cpuinfo_x86 *c = &cpu_data(cpu);
687 struct platform_device *pdev = coretemp_get_pdev(cpu); 684 struct platform_device *pdev = coretemp_get_pdev(cpu);
@@ -723,7 +720,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
723 coretemp_add_core(cpu, 0); 720 coretemp_add_core(cpu, 0);
724} 721}
725 722
726static void __cpuinit put_core_offline(unsigned int cpu) 723static void put_core_offline(unsigned int cpu)
727{ 724{
728 int i, indx; 725 int i, indx;
729 struct platform_data *pdata; 726 struct platform_data *pdata;
@@ -771,7 +768,7 @@ static void __cpuinit put_core_offline(unsigned int cpu)
771 coretemp_device_remove(cpu); 768 coretemp_device_remove(cpu);
772} 769}
773 770
774static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb, 771static int coretemp_cpu_callback(struct notifier_block *nfb,
775 unsigned long action, void *hcpu) 772 unsigned long action, void *hcpu)
776{ 773{
777 unsigned int cpu = (unsigned long) hcpu; 774 unsigned int cpu = (unsigned long) hcpu;
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 328fb0353c17..a41b5f3fc506 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -605,12 +605,12 @@ static int max6697_init_chip(struct i2c_client *client)
605 if (ret < 0) 605 if (ret < 0)
606 return ret; 606 return ret;
607 ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY, 607 ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
608 pdata->ideality_mask >> 1); 608 pdata->ideality_value);
609 if (ret < 0) 609 if (ret < 0)
610 return ret; 610 return ret;
611 ret = i2c_smbus_write_byte_data(client, 611 ret = i2c_smbus_write_byte_data(client,
612 MAX6581_REG_IDEALITY_SELECT, 612 MAX6581_REG_IDEALITY_SELECT,
613 pdata->ideality_value); 613 pdata->ideality_mask >> 1);
614 if (ret < 0) 614 if (ret < 0)
615 return ret; 615 return ret;
616 } 616 }
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 76f157b568ed..38944e94f65f 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -221,7 +221,7 @@ struct pdev_entry {
221static LIST_HEAD(pdev_list); 221static LIST_HEAD(pdev_list);
222static DEFINE_MUTEX(pdev_list_mutex); 222static DEFINE_MUTEX(pdev_list_mutex);
223 223
224static int __cpuinit via_cputemp_device_add(unsigned int cpu) 224static int via_cputemp_device_add(unsigned int cpu)
225{ 225{
226 int err; 226 int err;
227 struct platform_device *pdev; 227 struct platform_device *pdev;
@@ -262,7 +262,7 @@ exit:
262 return err; 262 return err;
263} 263}
264 264
265static void __cpuinit via_cputemp_device_remove(unsigned int cpu) 265static void via_cputemp_device_remove(unsigned int cpu)
266{ 266{
267 struct pdev_entry *p; 267 struct pdev_entry *p;
268 268
@@ -279,8 +279,8 @@ static void __cpuinit via_cputemp_device_remove(unsigned int cpu)
279 mutex_unlock(&pdev_list_mutex); 279 mutex_unlock(&pdev_list_mutex);
280} 280}
281 281
282static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb, 282static int via_cputemp_cpu_callback(struct notifier_block *nfb,
283 unsigned long action, void *hcpu) 283 unsigned long action, void *hcpu)
284{ 284{
285 unsigned int cpu = (unsigned long) hcpu; 285 unsigned int cpu = (unsigned long) hcpu;
286 286
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index ccec916bc3eb..af8f65fb1c05 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c)
246 bus_frequency = KEMPLD_I2C_FREQ_MAX; 246 bus_frequency = KEMPLD_I2C_FREQ_MAX;
247 247
248 if (pld->info.spec_major == 1) 248 if (pld->info.spec_major == 1)
249 prescale = pld->pld_clock / bus_frequency * 5 - 1000; 249 prescale = pld->pld_clock / (bus_frequency * 5) - 1000;
250 else 250 else
251 prescale = pld->pld_clock / bus_frequency * 4 - 3000; 251 prescale = pld->pld_clock / (bus_frequency * 4) - 3000;
252 252
253 if (prescale < 0) 253 if (prescale < 0)
254 prescale = 0; 254 prescale = 0;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index df8ff5aea5b5..e2e9a0dade96 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
493 * based on this empirical measurement and a lot of previous frobbing. 493 * based on this empirical measurement and a lot of previous frobbing.
494 */ 494 */
495 i2c->cmd_err = 0; 495 i2c->cmd_err = 0;
496 if (msg->len < 8) { 496 if (0) { /* disable PIO mode until a proper fix is made */
497 ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); 497 ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
498 if (ret) 498 if (ret)
499 mxs_i2c_reset(i2c); 499 mxs_i2c_reset(i2c);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 4427e8e46a7f..3ceac3e91dde 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
60{ 60{
61 unsigned int stepconfig; 61 unsigned int stepconfig;
62 int i, steps; 62 int i, steps;
63 u32 step_en;
64 63
65 /* 64 /*
66 * There are 16 configurable steps and 8 analog input 65 * There are 16 configurable steps and 8 analog input
@@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
86 adc_dev->channel_step[i] = steps; 85 adc_dev->channel_step[i] = steps;
87 steps++; 86 steps++;
88 } 87 }
89 step_en = get_adc_step_mask(adc_dev); 88
90 am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
91} 89}
92 90
93static const char * const chan_name_ain[] = { 91static const char * const chan_name_ain[] = {
@@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
142 int *val, int *val2, long mask) 140 int *val, int *val2, long mask)
143{ 141{
144 struct tiadc_device *adc_dev = iio_priv(indio_dev); 142 struct tiadc_device *adc_dev = iio_priv(indio_dev);
145 int i; 143 int i, map_val;
146 unsigned int fifo1count, read; 144 unsigned int fifo1count, read, stepid;
147 u32 step = UINT_MAX; 145 u32 step = UINT_MAX;
148 bool found = false; 146 bool found = false;
147 u32 step_en;
148 unsigned long timeout = jiffies + usecs_to_jiffies
149 (IDLE_TIMEOUT * adc_dev->channels);
150 step_en = get_adc_step_mask(adc_dev);
151 am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
152
153 /* Wait for ADC sequencer to complete sampling */
154 while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) {
155 if (time_after(jiffies, timeout))
156 return -EAGAIN;
157 }
158 map_val = chan->channel + TOTAL_CHANNELS;
149 159
150 /* 160 /*
151 * When the sub-system is first enabled, 161 * When the sub-system is first enabled,
@@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
170 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 180 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
171 for (i = 0; i < fifo1count; i++) { 181 for (i = 0; i < fifo1count; i++) {
172 read = tiadc_readl(adc_dev, REG_FIFO1); 182 read = tiadc_readl(adc_dev, REG_FIFO1);
173 if (read >> 16 == step) { 183 stepid = read & FIFOREAD_CHNLID_MASK;
174 *val = read & 0xfff; 184 stepid = stepid >> 0x10;
185
186 if (stepid == map_val) {
187 read = read & FIFOREAD_DATA_MASK;
175 found = true; 188 found = true;
189 *val = read;
176 } 190 }
177 } 191 }
178 am335x_tsc_se_update(adc_dev->mfd_tscadc); 192
179 if (found == false) 193 if (found == false)
180 return -EBUSY; 194 return -EBUSY;
181 return IIO_VAL_INT; 195 return IIO_VAL_INT;
@@ -183,6 +197,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
183 197
184static const struct iio_info tiadc_info = { 198static const struct iio_info tiadc_info = {
185 .read_raw = &tiadc_read_raw, 199 .read_raw = &tiadc_read_raw,
200 .driver_module = THIS_MODULE,
186}; 201};
187 202
188static int tiadc_probe(struct platform_device *pdev) 203static int tiadc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 85aeef60dc5f..d546f50f9258 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -235,8 +235,10 @@ static int ad7303_probe(struct spi_device *spi)
235 235
236 if (ext_ref) { 236 if (ext_ref) {
237 st->vref_reg = regulator_get(&spi->dev, "REF"); 237 st->vref_reg = regulator_get(&spi->dev, "REF");
238 if (IS_ERR(st->vref_reg)) 238 if (IS_ERR(st->vref_reg)) {
239 ret = PTR_ERR(st->vref_reg);
239 goto err_disable_vdd_reg; 240 goto err_disable_vdd_reg;
241 }
240 242
241 ret = regulator_enable(st->vref_reg); 243 ret = regulator_enable(st->vref_reg);
242 if (ret) 244 if (ret)
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 4d6c7d84e155..0dd9bb873130 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -104,7 +104,7 @@ void iio_trigger_unregister(struct iio_trigger *trig_info)
104 104
105 ida_simple_remove(&iio_trigger_ida, trig_info->id); 105 ida_simple_remove(&iio_trigger_ida, trig_info->id);
106 /* Possible issue in here */ 106 /* Possible issue in here */
107 device_unregister(&trig_info->dev); 107 device_del(&trig_info->dev);
108} 108}
109EXPORT_SYMBOL(iio_trigger_unregister); 109EXPORT_SYMBOL(iio_trigger_unregister);
110 110
@@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name,
127void iio_trigger_poll(struct iio_trigger *trig, s64 time) 127void iio_trigger_poll(struct iio_trigger *trig, s64 time)
128{ 128{
129 int i; 129 int i;
130 if (!trig->use_count) 130
131 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) 131 if (!atomic_read(&trig->use_count)) {
132 if (trig->subirqs[i].enabled) { 132 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
133 trig->use_count++; 133
134 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
135 if (trig->subirqs[i].enabled)
134 generic_handle_irq(trig->subirq_base + i); 136 generic_handle_irq(trig->subirq_base + i);
135 } 137 else
138 iio_trigger_notify_done(trig);
139 }
140 }
136} 141}
137EXPORT_SYMBOL(iio_trigger_poll); 142EXPORT_SYMBOL(iio_trigger_poll);
138 143
@@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
146void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) 151void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
147{ 152{
148 int i; 153 int i;
149 if (!trig->use_count) 154
150 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) 155 if (!atomic_read(&trig->use_count)) {
151 if (trig->subirqs[i].enabled) { 156 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
152 trig->use_count++; 157
158 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
159 if (trig->subirqs[i].enabled)
153 handle_nested_irq(trig->subirq_base + i); 160 handle_nested_irq(trig->subirq_base + i);
154 } 161 else
162 iio_trigger_notify_done(trig);
163 }
164 }
155} 165}
156EXPORT_SYMBOL(iio_trigger_poll_chained); 166EXPORT_SYMBOL(iio_trigger_poll_chained);
157 167
158void iio_trigger_notify_done(struct iio_trigger *trig) 168void iio_trigger_notify_done(struct iio_trigger *trig)
159{ 169{
160 trig->use_count--; 170 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
161 if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable) 171 trig->ops->try_reenable)
162 if (trig->ops->try_reenable(trig)) 172 if (trig->ops->try_reenable(trig))
163 /* Missed an interrupt so launch new poll now */ 173 /* Missed an interrupt so launch new poll now */
164 iio_trigger_poll(trig, 0); 174 iio_trigger_poll(trig, 0);
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 98ddc323add0..0cf5f8e06cfc 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -451,7 +451,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
451 int ret; 451 int ret;
452 452
453 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); 453 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
454 if (ret == 0) 454 if (ret >= 0)
455 raw64 += offset; 455 raw64 += offset;
456 456
457 scale_type = iio_channel_read(chan, &scale_val, &scale_val2, 457 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 9c343b40665e..3ffbc56917b4 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -28,7 +28,9 @@
28#include <linux/iio/common/st_sensors.h> 28#include <linux/iio/common/st_sensors.h>
29#include "st_pressure.h" 29#include "st_pressure.h"
30 30
31#define ST_PRESS_MBAR_TO_KPASCAL(x) (x * 10) 31#define ST_PRESS_LSB_PER_MBAR 4096UL
32#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
33 ST_PRESS_LSB_PER_MBAR)
32#define ST_PRESS_NUMBER_DATA_CHANNELS 1 34#define ST_PRESS_NUMBER_DATA_CHANNELS 1
33 35
34/* DEFAULT VALUE FOR SENSORS */ 36/* DEFAULT VALUE FOR SENSORS */
@@ -51,8 +53,8 @@
51#define ST_PRESS_1_FS_ADDR 0x23 53#define ST_PRESS_1_FS_ADDR 0x23
52#define ST_PRESS_1_FS_MASK 0x30 54#define ST_PRESS_1_FS_MASK 0x30
53#define ST_PRESS_1_FS_AVL_1260_VAL 0x00 55#define ST_PRESS_1_FS_AVL_1260_VAL 0x00
54#define ST_PRESS_1_FS_AVL_1260_GAIN ST_PRESS_MBAR_TO_KPASCAL(244141)
55#define ST_PRESS_1_FS_AVL_TEMP_GAIN 2083000 56#define ST_PRESS_1_FS_AVL_TEMP_GAIN 2083000
57#define ST_PRESS_1_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
56#define ST_PRESS_1_BDU_ADDR 0x20 58#define ST_PRESS_1_BDU_ADDR 0x20
57#define ST_PRESS_1_BDU_MASK 0x04 59#define ST_PRESS_1_BDU_MASK 0x04
58#define ST_PRESS_1_DRDY_IRQ_ADDR 0x22 60#define ST_PRESS_1_DRDY_IRQ_ADDR 0x22
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f1c279fabe64..7c0f9535fb7d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -423,7 +423,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
423 struct sockaddr_ib *addr; 423 struct sockaddr_ib *addr;
424 union ib_gid gid, sgid, *dgid; 424 union ib_gid gid, sgid, *dgid;
425 u16 pkey, index; 425 u16 pkey, index;
426 u8 port, p; 426 u8 p;
427 int i; 427 int i;
428 428
429 cma_dev = NULL; 429 cma_dev = NULL;
@@ -443,7 +443,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
443 if (!memcmp(&gid, dgid, sizeof(gid))) { 443 if (!memcmp(&gid, dgid, sizeof(gid))) {
444 cma_dev = cur_dev; 444 cma_dev = cur_dev;
445 sgid = gid; 445 sgid = gid;
446 port = p; 446 id_priv->id.port_num = p;
447 goto found; 447 goto found;
448 } 448 }
449 449
@@ -451,7 +451,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
451 dgid->global.subnet_prefix)) { 451 dgid->global.subnet_prefix)) {
452 cma_dev = cur_dev; 452 cma_dev = cur_dev;
453 sgid = gid; 453 sgid = gid;
454 port = p; 454 id_priv->id.port_num = p;
455 } 455 }
456 } 456 }
457 } 457 }
@@ -462,7 +462,6 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
462 462
463found: 463found:
464 cma_attach_to_dev(id_priv, cma_dev); 464 cma_attach_to_dev(id_priv, cma_dev);
465 id_priv->id.port_num = port;
466 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 465 addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
467 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 466 memcpy(&addr->sib_addr, &sgid, sizeof sgid);
468 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 467 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
@@ -880,7 +879,8 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
880{ 879{
881 struct cma_hdr *hdr; 880 struct cma_hdr *hdr;
882 881
883 if (listen_id->route.addr.src_addr.ss_family == AF_IB) { 882 if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
883 (ib_event->event == IB_CM_REQ_RECEIVED)) {
884 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); 884 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
885 return 0; 885 return 0;
886 } 886 }
@@ -2677,29 +2677,32 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2677{ 2677{
2678 struct ib_cm_sidr_req_param req; 2678 struct ib_cm_sidr_req_param req;
2679 struct ib_cm_id *id; 2679 struct ib_cm_id *id;
2680 void *private_data;
2680 int offset, ret; 2681 int offset, ret;
2681 2682
2683 memset(&req, 0, sizeof req);
2682 offset = cma_user_data_offset(id_priv); 2684 offset = cma_user_data_offset(id_priv);
2683 req.private_data_len = offset + conn_param->private_data_len; 2685 req.private_data_len = offset + conn_param->private_data_len;
2684 if (req.private_data_len < conn_param->private_data_len) 2686 if (req.private_data_len < conn_param->private_data_len)
2685 return -EINVAL; 2687 return -EINVAL;
2686 2688
2687 if (req.private_data_len) { 2689 if (req.private_data_len) {
2688 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2690 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2689 if (!req.private_data) 2691 if (!private_data)
2690 return -ENOMEM; 2692 return -ENOMEM;
2691 } else { 2693 } else {
2692 req.private_data = NULL; 2694 private_data = NULL;
2693 } 2695 }
2694 2696
2695 if (conn_param->private_data && conn_param->private_data_len) 2697 if (conn_param->private_data && conn_param->private_data_len)
2696 memcpy((void *) req.private_data + offset, 2698 memcpy(private_data + offset, conn_param->private_data,
2697 conn_param->private_data, conn_param->private_data_len); 2699 conn_param->private_data_len);
2698 2700
2699 if (req.private_data) { 2701 if (private_data) {
2700 ret = cma_format_hdr((void *) req.private_data, id_priv); 2702 ret = cma_format_hdr(private_data, id_priv);
2701 if (ret) 2703 if (ret)
2702 goto out; 2704 goto out;
2705 req.private_data = private_data;
2703 } 2706 }
2704 2707
2705 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 2708 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
@@ -2721,7 +2724,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2721 id_priv->cm_id.ib = NULL; 2724 id_priv->cm_id.ib = NULL;
2722 } 2725 }
2723out: 2726out:
2724 kfree(req.private_data); 2727 kfree(private_data);
2725 return ret; 2728 return ret;
2726} 2729}
2727 2730
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index dc3fd1e8af07..4c837e66516b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2663,6 +2663,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2663 int ret, i; 2663 int ret, i;
2664 struct ib_qp_attr *attr; 2664 struct ib_qp_attr *attr;
2665 struct ib_qp *qp; 2665 struct ib_qp *qp;
2666 u16 pkey_index;
2666 2667
2667 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2668 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2668 if (!attr) { 2669 if (!attr) {
@@ -2670,6 +2671,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2670 return -ENOMEM; 2671 return -ENOMEM;
2671 } 2672 }
2672 2673
2674 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2675 IB_DEFAULT_PKEY_FULL, &pkey_index);
2676 if (ret)
2677 pkey_index = 0;
2678
2673 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2679 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2674 qp = port_priv->qp_info[i].qp; 2680 qp = port_priv->qp_info[i].qp;
2675 if (!qp) 2681 if (!qp)
@@ -2680,7 +2686,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2680 * one is needed for the Reset to Init transition 2686 * one is needed for the Reset to Init transition
2681 */ 2687 */
2682 attr->qp_state = IB_QPS_INIT; 2688 attr->qp_state = IB_QPS_INIT;
2683 attr->pkey_index = 0; 2689 attr->pkey_index = pkey_index;
2684 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 2690 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2685 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 2691 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2686 IB_QP_PKEY_INDEX | IB_QP_QKEY); 2692 IB_QP_PKEY_INDEX | IB_QP_QKEY);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e87f2201b220..d2283837d451 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -226,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
226 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * 226 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
227 sizeof(struct t3_cqe)); 227 sizeof(struct t3_cqe));
228 uresp.memsize = mm->len; 228 uresp.memsize = mm->len;
229 uresp.reserved = 0;
229 resplen = sizeof uresp; 230 resplen = sizeof uresp;
230 } 231 }
231 if (ib_copy_to_udata(udata, &uresp, resplen)) { 232 if (ib_copy_to_udata(udata, &uresp, resplen)) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 232040447e8a..a4975e1654a6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1657,6 +1657,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1657 if (mm5) { 1657 if (mm5) {
1658 uresp.ma_sync_key = ucontext->key; 1658 uresp.ma_sync_key = ucontext->key;
1659 ucontext->key += PAGE_SIZE; 1659 ucontext->key += PAGE_SIZE;
1660 } else {
1661 uresp.ma_sync_key = 0;
1660 } 1662 }
1661 uresp.sq_key = ucontext->key; 1663 uresp.sq_key = ucontext->key;
1662 ucontext->key += PAGE_SIZE; 1664 ucontext->key += PAGE_SIZE;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 4d599cedbb0b..f2a3f48107e7 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1511,8 +1511,14 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1511 1511
1512 memset(&attr, 0, sizeof attr); 1512 memset(&attr, 0, sizeof attr);
1513 attr.qp_state = IB_QPS_INIT; 1513 attr.qp_state = IB_QPS_INIT;
1514 attr.pkey_index = 1514 ret = 0;
1515 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; 1515 if (create_tun)
1516 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1517 ctx->port, IB_DEFAULT_PKEY_FULL,
1518 &attr.pkey_index);
1519 if (ret || !create_tun)
1520 attr.pkey_index =
1521 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1516 attr.qkey = IB_QP1_QKEY; 1522 attr.qkey = IB_QP1_QKEY;
1517 attr.port_num = ctx->port; 1523 attr.port_num = ctx->port;
1518 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); 1524 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8000fff4d444..3f831de9a4d8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -619,7 +619,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
619 619
620 resp.tot_uuars = req.total_num_uuars; 620 resp.tot_uuars = req.total_num_uuars;
621 resp.num_ports = dev->mdev.caps.num_ports; 621 resp.num_ports = dev->mdev.caps.num_ports;
622 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 622 err = ib_copy_to_udata(udata, &resp,
623 sizeof(resp) - sizeof(resp.reserved));
623 if (err) 624 if (err)
624 goto out_uars; 625 goto out_uars;
625 626
@@ -1426,7 +1427,8 @@ static int init_one(struct pci_dev *pdev,
1426 if (err) 1427 if (err)
1427 goto err_eqs; 1428 goto err_eqs;
1428 1429
1429 if (ib_register_device(&dev->ib_dev, NULL)) 1430 err = ib_register_device(&dev->ib_dev, NULL);
1431 if (err)
1430 goto err_rsrc; 1432 goto err_rsrc;
1431 1433
1432 err = create_umr_res(dev); 1434 err = create_umr_res(dev);
@@ -1434,8 +1436,9 @@ static int init_one(struct pci_dev *pdev,
1434 goto err_dev; 1436 goto err_dev;
1435 1437
1436 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 1438 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
1437 if (device_create_file(&dev->ib_dev.dev, 1439 err = device_create_file(&dev->ib_dev.dev,
1438 mlx5_class_attributes[i])) 1440 mlx5_class_attributes[i]);
1441 if (err)
1439 goto err_umrc; 1442 goto err_umrc;
1440 } 1443 }
1441 1444
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 16ac54c9819f..045f8cdbd303 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -199,7 +199,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
199 199
200static int sq_overhead(enum ib_qp_type qp_type) 200static int sq_overhead(enum ib_qp_type qp_type)
201{ 201{
202 int size; 202 int size = 0;
203 203
204 switch (qp_type) { 204 switch (qp_type) {
205 case IB_QPT_XRC_INI: 205 case IB_QPT_XRC_INI:
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 418004c93feb..90200245c5eb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3570,10 +3570,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3570 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; 3570 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3571 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; 3571 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3572 nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," 3572 nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
3573 " Tcp state = %d, iWARP state = %d\n", 3573 " Tcp state = %s, iWARP state = %s\n",
3574 async_event_id, 3574 async_event_id,
3575 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, 3575 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
3576 tcp_state, iwarp_state); 3576 nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
3577 3577
3578 aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); 3578 aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
3579 if (aeq_info & NES_AEQE_QP) { 3579 if (aeq_info & NES_AEQE_QP) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 8f67fe2e91e6..5b53ca5a2284 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1384,6 +1384,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1384 1384
1385 if (ibpd->uobject) { 1385 if (ibpd->uobject) {
1386 uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; 1386 uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
1387 uresp.mmap_rq_db_index = 0;
1387 uresp.actual_sq_size = sq_size; 1388 uresp.actual_sq_size = sq_size;
1388 uresp.actual_rq_size = rq_size; 1389 uresp.actual_rq_size = rq_size;
1389 uresp.qp_id = nesqp->hwqp.qp_id; 1390 uresp.qp_id = nesqp->hwqp.qp_id;
@@ -1767,7 +1768,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1767 resp.cq_id = nescq->hw_cq.cq_number; 1768 resp.cq_id = nescq->hw_cq.cq_number;
1768 resp.cq_size = nescq->hw_cq.cq_size; 1769 resp.cq_size = nescq->hw_cq.cq_size;
1769 resp.mmap_db_index = 0; 1770 resp.mmap_db_index = 0;
1770 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 1771 if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) {
1771 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); 1772 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1772 kfree(nescq); 1773 kfree(nescq);
1773 return ERR_PTR(-EFAULT); 1774 return ERR_PTR(-EFAULT);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index a877a8ed7907..f4c587c68f64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,7 +29,6 @@
29#include <net/netevent.h> 29#include <net/netevent.h>
30 30
31#include <rdma/ib_addr.h> 31#include <rdma/ib_addr.h>
32#include <rdma/ib_cache.h>
33 32
34#include "ocrdma.h" 33#include "ocrdma.h"
35#include "ocrdma_verbs.h" 34#include "ocrdma_verbs.h"
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index dcfbab177faa..f36630e4b6be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -242,6 +242,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
242 memset(ctx->ah_tbl.va, 0, map_len); 242 memset(ctx->ah_tbl.va, 0, map_len);
243 ctx->ah_tbl.len = map_len; 243 ctx->ah_tbl.len = map_len;
244 244
245 memset(&resp, 0, sizeof(resp));
245 resp.ah_tbl_len = ctx->ah_tbl.len; 246 resp.ah_tbl_len = ctx->ah_tbl.len;
246 resp.ah_tbl_page = ctx->ah_tbl.pa; 247 resp.ah_tbl_page = ctx->ah_tbl.pa;
247 248
@@ -253,7 +254,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
253 resp.wqe_size = dev->attr.wqe_size; 254 resp.wqe_size = dev->attr.wqe_size;
254 resp.rqe_size = dev->attr.rqe_size; 255 resp.rqe_size = dev->attr.rqe_size;
255 resp.dpp_wqe_size = dev->attr.wqe_size; 256 resp.dpp_wqe_size = dev->attr.wqe_size;
256 resp.rsvd = 0;
257 257
258 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); 258 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
259 status = ib_copy_to_udata(udata, &resp, sizeof(resp)); 259 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -338,6 +338,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
338 struct ocrdma_alloc_pd_uresp rsp; 338 struct ocrdma_alloc_pd_uresp rsp;
339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
340 340
341 memset(&rsp, 0, sizeof(rsp));
341 rsp.id = pd->id; 342 rsp.id = pd->id;
342 rsp.dpp_enabled = pd->dpp_enabled; 343 rsp.dpp_enabled = pd->dpp_enabled;
343 db_page_addr = pd->dev->nic_info.unmapped_db + 344 db_page_addr = pd->dev->nic_info.unmapped_db +
@@ -692,6 +693,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
692 struct ocrdma_ucontext *uctx; 693 struct ocrdma_ucontext *uctx;
693 struct ocrdma_create_cq_uresp uresp; 694 struct ocrdma_create_cq_uresp uresp;
694 695
696 memset(&uresp, 0, sizeof(uresp));
695 uresp.cq_id = cq->id; 697 uresp.cq_id = cq->id;
696 uresp.page_size = cq->len; 698 uresp.page_size = cq->len;
697 uresp.num_pages = 1; 699 uresp.num_pages = 1;
@@ -1460,6 +1462,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
1460 int status; 1462 int status;
1461 struct ocrdma_create_srq_uresp uresp; 1463 struct ocrdma_create_srq_uresp uresp;
1462 1464
1465 memset(&uresp, 0, sizeof(uresp));
1463 uresp.rq_dbid = srq->rq.dbid; 1466 uresp.rq_dbid = srq->rq.dbid;
1464 uresp.num_rq_pages = 1; 1467 uresp.num_rq_pages = 1;
1465 uresp.rq_page_addr[0] = srq->rq.pa; 1468 uresp.rq_page_addr[0] = srq->rq.pa;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 21e8b09d4bf8..016e7429adf6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1596,6 +1596,8 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1596 struct qib_devdata *dd = ppd->dd; 1596 struct qib_devdata *dd = ppd->dd;
1597 1597
1598 errs &= QIB_E_P_SDMAERRS; 1598 errs &= QIB_E_P_SDMAERRS;
1599 err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1600 errs, qib_7322p_error_msgs);
1599 1601
1600 if (errs & QIB_E_P_SDMAUNEXPDATA) 1602 if (errs & QIB_E_P_SDMAUNEXPDATA)
1601 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, 1603 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 32162d355370..9b5322d8cd5a 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -717,7 +717,7 @@ void dump_sdma_state(struct qib_pportdata *ppd)
717 struct qib_sdma_txreq *txp, *txpnext; 717 struct qib_sdma_txreq *txp, *txpnext;
718 __le64 *descqp; 718 __le64 *descqp;
719 u64 desc[2]; 719 u64 desc[2];
720 dma_addr_t addr; 720 u64 addr;
721 u16 gen, dwlen, dwoffset; 721 u16 gen, dwlen, dwoffset;
722 u16 head, tail, cnt; 722 u16 head, tail, cnt;
723 723
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2cfa76f5d99e..196b1d13cbcb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -932,12 +932,47 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
932 return 0; 932 return 0;
933} 933}
934 934
935/*
936 * Takes whatever value which is in pkey index 0 and updates priv->pkey
937 * returns 0 if the pkey value was changed.
938 */
939static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
940{
941 int result;
942 u16 prev_pkey;
943
944 prev_pkey = priv->pkey;
945 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
946 if (result) {
947 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
948 priv->port, result);
949 return result;
950 }
951
952 priv->pkey |= 0x8000;
953
954 if (prev_pkey != priv->pkey) {
955 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
956 prev_pkey, priv->pkey);
957 /*
958 * Update the pkey in the broadcast address, while making sure to set
959 * the full membership bit, so that we join the right broadcast group.
960 */
961 priv->dev->broadcast[8] = priv->pkey >> 8;
962 priv->dev->broadcast[9] = priv->pkey & 0xff;
963 return 0;
964 }
965
966 return 1;
967}
968
935static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 969static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
936 enum ipoib_flush_level level) 970 enum ipoib_flush_level level)
937{ 971{
938 struct ipoib_dev_priv *cpriv; 972 struct ipoib_dev_priv *cpriv;
939 struct net_device *dev = priv->dev; 973 struct net_device *dev = priv->dev;
940 u16 new_index; 974 u16 new_index;
975 int result;
941 976
942 mutex_lock(&priv->vlan_mutex); 977 mutex_lock(&priv->vlan_mutex);
943 978
@@ -951,6 +986,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
951 mutex_unlock(&priv->vlan_mutex); 986 mutex_unlock(&priv->vlan_mutex);
952 987
953 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { 988 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
989 /* for non-child devices must check/update the pkey value here */
990 if (level == IPOIB_FLUSH_HEAVY &&
991 !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
992 update_parent_pkey(priv);
954 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 993 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
955 return; 994 return;
956 } 995 }
@@ -961,21 +1000,32 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
961 } 1000 }
962 1001
963 if (level == IPOIB_FLUSH_HEAVY) { 1002 if (level == IPOIB_FLUSH_HEAVY) {
964 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 1003 /* child devices chase their origin pkey value, while non-child
965 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 1004 * (parent) devices should always takes what present in pkey index 0
966 ipoib_ib_dev_down(dev, 0); 1005 */
967 ipoib_ib_dev_stop(dev, 0); 1006 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
968 if (ipoib_pkey_dev_delay_open(dev)) 1007 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
1008 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1009 ipoib_ib_dev_down(dev, 0);
1010 ipoib_ib_dev_stop(dev, 0);
1011 if (ipoib_pkey_dev_delay_open(dev))
1012 return;
1013 }
1014 /* restart QP only if P_Key index is changed */
1015 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1016 new_index == priv->pkey_index) {
1017 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
969 return; 1018 return;
1019 }
1020 priv->pkey_index = new_index;
1021 } else {
1022 result = update_parent_pkey(priv);
1023 /* restart QP only if P_Key value changed */
1024 if (result) {
1025 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1026 return;
1027 }
970 } 1028 }
971
972 /* restart QP only if P_Key index is changed */
973 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
974 new_index == priv->pkey_index) {
975 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
976 return;
977 }
978 priv->pkey_index = new_index;
979 } 1029 }
980 1030
981 if (level == IPOIB_FLUSH_LIGHT) { 1031 if (level == IPOIB_FLUSH_LIGHT) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b6e049a3c7a8..c6f71a88c55c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1461,7 +1461,7 @@ static ssize_t create_child(struct device *dev,
1461 if (sscanf(buf, "%i", &pkey) != 1) 1461 if (sscanf(buf, "%i", &pkey) != 1)
1462 return -EINVAL; 1462 return -EINVAL;
1463 1463
1464 if (pkey < 0 || pkey > 0xffff) 1464 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
1465 return -EINVAL; 1465 return -EINVAL;
1466 1466
1467 /* 1467 /*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 74685936c948..f81abe16cf09 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -119,6 +119,15 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
119 } else 119 } else
120 child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); 120 child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
121 121
122 if (child_pkey == 0 || child_pkey == 0x8000)
123 return -EINVAL;
124
125 /*
126 * Set the full membership bit, so that we join the right
127 * broadcast group, etc.
128 */
129 child_pkey |= 0x8000;
130
122 err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); 131 err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
123 132
124 if (!err && data) 133 if (!err && data)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 19ceaa60e0f4..ee7c50312066 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -414,7 +414,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
414 writel_relaxed(1, base + GIC_DIST_CTRL); 414 writel_relaxed(1, base + GIC_DIST_CTRL);
415} 415}
416 416
417static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) 417static void gic_cpu_init(struct gic_chip_data *gic)
418{ 418{
419 void __iomem *dist_base = gic_data_dist_base(gic); 419 void __iomem *dist_base = gic_data_dist_base(gic);
420 void __iomem *base = gic_data_cpu_base(gic); 420 void __iomem *base = gic_data_cpu_base(gic);
@@ -702,8 +702,8 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
702} 702}
703 703
704#ifdef CONFIG_SMP 704#ifdef CONFIG_SMP
705static int __cpuinit gic_secondary_init(struct notifier_block *nfb, 705static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
706 unsigned long action, void *hcpu) 706 void *hcpu)
707{ 707{
708 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 708 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
709 gic_cpu_init(&gic_data[0]); 709 gic_cpu_init(&gic_data[0]);
@@ -714,7 +714,7 @@ static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
714 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high 714 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
715 * priority because the GIC needs to be up before the ARM generic timers. 715 * priority because the GIC needs to be up before the ARM generic timers.
716 */ 716 */
717static struct notifier_block __cpuinitdata gic_cpu_notifier = { 717static struct notifier_block gic_cpu_notifier = {
718 .notifier_call = gic_secondary_init, 718 .notifier_call = gic_secondary_init,
719 .priority = 100, 719 .priority = 100,
720}; 720};
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 0b9a79b2f48a..82fc86a90c1a 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -439,15 +439,15 @@ static void backside_setup_pid(void)
439 439
440/* Slots fan */ 440/* Slots fan */
441static const struct wf_pid_param slots_param = { 441static const struct wf_pid_param slots_param = {
442 .interval = 5, 442 .interval = 1,
443 .history_len = 2, 443 .history_len = 20,
444 .gd = 30 << 20, 444 .gd = 0,
445 .gp = 5 << 20, 445 .gp = 0,
446 .gr = 0, 446 .gr = 0x00100000,
447 .itarget = 40 << 16, 447 .itarget = 3200000,
448 .additive = 1, 448 .additive = 0,
449 .min = 300, 449 .min = 20,
450 .max = 4000, 450 .max = 100,
451}; 451};
452 452
453static void slots_fan_tick(void) 453static void slots_fan_tick(void)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 048f2947e08b..e45f5575fd4d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,7 +63,10 @@
63#include "bcache.h" 63#include "bcache.h"
64#include "btree.h" 64#include "btree.h"
65 65
66#include <linux/freezer.h>
67#include <linux/kthread.h>
66#include <linux/random.h> 68#include <linux/random.h>
69#include <trace/events/bcache.h>
67 70
68#define MAX_IN_FLIGHT_DISCARDS 8U 71#define MAX_IN_FLIGHT_DISCARDS 8U
69 72
@@ -151,7 +154,7 @@ static void discard_finish(struct work_struct *w)
151 mutex_unlock(&ca->set->bucket_lock); 154 mutex_unlock(&ca->set->bucket_lock);
152 155
153 closure_wake_up(&ca->set->bucket_wait); 156 closure_wake_up(&ca->set->bucket_wait);
154 wake_up(&ca->set->alloc_wait); 157 wake_up_process(ca->alloc_thread);
155 158
156 closure_put(&ca->set->cl); 159 closure_put(&ca->set->cl);
157} 160}
@@ -350,38 +353,30 @@ static void invalidate_buckets(struct cache *ca)
350 break; 353 break;
351 } 354 }
352 355
353 pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu", 356 trace_bcache_alloc_invalidate(ca);
354 fifo_used(&ca->free), ca->free.size,
355 fifo_used(&ca->free_inc), ca->free_inc.size,
356 fifo_used(&ca->unused), ca->unused.size);
357} 357}
358 358
359#define allocator_wait(ca, cond) \ 359#define allocator_wait(ca, cond) \
360do { \ 360do { \
361 DEFINE_WAIT(__wait); \
362 \
363 while (1) { \ 361 while (1) { \
364 prepare_to_wait(&ca->set->alloc_wait, \ 362 set_current_state(TASK_INTERRUPTIBLE); \
365 &__wait, TASK_INTERRUPTIBLE); \
366 if (cond) \ 363 if (cond) \
367 break; \ 364 break; \
368 \ 365 \
369 mutex_unlock(&(ca)->set->bucket_lock); \ 366 mutex_unlock(&(ca)->set->bucket_lock); \
370 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ 367 if (kthread_should_stop()) \
371 finish_wait(&ca->set->alloc_wait, &__wait); \ 368 return 0; \
372 closure_return(cl); \
373 } \
374 \ 369 \
370 try_to_freeze(); \
375 schedule(); \ 371 schedule(); \
376 mutex_lock(&(ca)->set->bucket_lock); \ 372 mutex_lock(&(ca)->set->bucket_lock); \
377 } \ 373 } \
378 \ 374 __set_current_state(TASK_RUNNING); \
379 finish_wait(&ca->set->alloc_wait, &__wait); \
380} while (0) 375} while (0)
381 376
382void bch_allocator_thread(struct closure *cl) 377static int bch_allocator_thread(void *arg)
383{ 378{
384 struct cache *ca = container_of(cl, struct cache, alloc); 379 struct cache *ca = arg;
385 380
386 mutex_lock(&ca->set->bucket_lock); 381 mutex_lock(&ca->set->bucket_lock);
387 382
@@ -442,7 +437,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
442{ 437{
443 long r = -1; 438 long r = -1;
444again: 439again:
445 wake_up(&ca->set->alloc_wait); 440 wake_up_process(ca->alloc_thread);
446 441
447 if (fifo_used(&ca->free) > ca->watermark[watermark] && 442 if (fifo_used(&ca->free) > ca->watermark[watermark] &&
448 fifo_pop(&ca->free, r)) { 443 fifo_pop(&ca->free, r)) {
@@ -476,9 +471,7 @@ again:
476 return r; 471 return r;
477 } 472 }
478 473
479 pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu", 474 trace_bcache_alloc_fail(ca);
480 atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free),
481 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
482 475
483 if (cl) { 476 if (cl) {
484 closure_wait(&ca->set->bucket_wait, cl); 477 closure_wait(&ca->set->bucket_wait, cl);
@@ -552,6 +545,17 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
552 545
553/* Init */ 546/* Init */
554 547
548int bch_cache_allocator_start(struct cache *ca)
549{
550 struct task_struct *k = kthread_run(bch_allocator_thread,
551 ca, "bcache_allocator");
552 if (IS_ERR(k))
553 return PTR_ERR(k);
554
555 ca->alloc_thread = k;
556 return 0;
557}
558
555void bch_cache_allocator_exit(struct cache *ca) 559void bch_cache_allocator_exit(struct cache *ca)
556{ 560{
557 struct discard *d; 561 struct discard *d;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d3e15b42a4ab..b39f6f0b45f2 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -178,7 +178,6 @@
178#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ 178#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
179 179
180#include <linux/bio.h> 180#include <linux/bio.h>
181#include <linux/blktrace_api.h>
182#include <linux/kobject.h> 181#include <linux/kobject.h>
183#include <linux/list.h> 182#include <linux/list.h>
184#include <linux/mutex.h> 183#include <linux/mutex.h>
@@ -388,8 +387,6 @@ struct keybuf_key {
388typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); 387typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
389 388
390struct keybuf { 389struct keybuf {
391 keybuf_pred_fn *key_predicate;
392
393 struct bkey last_scanned; 390 struct bkey last_scanned;
394 spinlock_t lock; 391 spinlock_t lock;
395 392
@@ -437,9 +434,12 @@ struct bcache_device {
437 434
438 /* If nonzero, we're detaching/unregistering from cache set */ 435 /* If nonzero, we're detaching/unregistering from cache set */
439 atomic_t detaching; 436 atomic_t detaching;
437 int flush_done;
438
439 uint64_t nr_stripes;
440 unsigned stripe_size_bits;
441 atomic_t *stripe_sectors_dirty;
440 442
441 atomic_long_t sectors_dirty;
442 unsigned long sectors_dirty_gc;
443 unsigned long sectors_dirty_last; 443 unsigned long sectors_dirty_last;
444 long sectors_dirty_derivative; 444 long sectors_dirty_derivative;
445 445
@@ -531,6 +531,7 @@ struct cached_dev {
531 unsigned sequential_merge:1; 531 unsigned sequential_merge:1;
532 unsigned verify:1; 532 unsigned verify:1;
533 533
534 unsigned partial_stripes_expensive:1;
534 unsigned writeback_metadata:1; 535 unsigned writeback_metadata:1;
535 unsigned writeback_running:1; 536 unsigned writeback_running:1;
536 unsigned char writeback_percent; 537 unsigned char writeback_percent;
@@ -565,8 +566,7 @@ struct cache {
565 566
566 unsigned watermark[WATERMARK_MAX]; 567 unsigned watermark[WATERMARK_MAX];
567 568
568 struct closure alloc; 569 struct task_struct *alloc_thread;
569 struct workqueue_struct *alloc_workqueue;
570 570
571 struct closure prio; 571 struct closure prio;
572 struct prio_set *disk_buckets; 572 struct prio_set *disk_buckets;
@@ -664,13 +664,9 @@ struct gc_stat {
664 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; 664 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
665 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. 665 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
666 * flushing dirty data). 666 * flushing dirty data).
667 *
668 * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
669 * the allocation thread.
670 */ 667 */
671#define CACHE_SET_UNREGISTERING 0 668#define CACHE_SET_UNREGISTERING 0
672#define CACHE_SET_STOPPING 1 669#define CACHE_SET_STOPPING 1
673#define CACHE_SET_STOPPING_2 2
674 670
675struct cache_set { 671struct cache_set {
676 struct closure cl; 672 struct closure cl;
@@ -703,9 +699,6 @@ struct cache_set {
703 /* For the btree cache */ 699 /* For the btree cache */
704 struct shrinker shrink; 700 struct shrinker shrink;
705 701
706 /* For the allocator itself */
707 wait_queue_head_t alloc_wait;
708
709 /* For the btree cache and anything allocation related */ 702 /* For the btree cache and anything allocation related */
710 struct mutex bucket_lock; 703 struct mutex bucket_lock;
711 704
@@ -823,10 +816,9 @@ struct cache_set {
823 816
824 /* 817 /*
825 * A btree node on disk could have too many bsets for an iterator to fit 818 * A btree node on disk could have too many bsets for an iterator to fit
826 * on the stack - this is a single element mempool for btree_read_work() 819 * on the stack - have to dynamically allocate them
827 */ 820 */
828 struct mutex fill_lock; 821 mempool_t *fill_iter;
829 struct btree_iter *fill_iter;
830 822
831 /* 823 /*
832 * btree_sort() is a merge sort and requires temporary space - single 824 * btree_sort() is a merge sort and requires temporary space - single
@@ -834,6 +826,7 @@ struct cache_set {
834 */ 826 */
835 struct mutex sort_lock; 827 struct mutex sort_lock;
836 struct bset *sort; 828 struct bset *sort;
829 unsigned sort_crit_factor;
837 830
838 /* List of buckets we're currently writing data to */ 831 /* List of buckets we're currently writing data to */
839 struct list_head data_buckets; 832 struct list_head data_buckets;
@@ -906,8 +899,6 @@ static inline unsigned local_clock_us(void)
906 return local_clock() >> 10; 899 return local_clock() >> 10;
907} 900}
908 901
909#define MAX_BSETS 4U
910
911#define BTREE_PRIO USHRT_MAX 902#define BTREE_PRIO USHRT_MAX
912#define INITIAL_PRIO 32768 903#define INITIAL_PRIO 32768
913 904
@@ -1112,23 +1103,6 @@ static inline void __bkey_put(struct cache_set *c, struct bkey *k)
1112 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 1103 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
1113} 1104}
1114 1105
1115/* Blktrace macros */
1116
1117#define blktrace_msg(c, fmt, ...) \
1118do { \
1119 struct request_queue *q = bdev_get_queue(c->bdev); \
1120 if (q) \
1121 blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \
1122} while (0)
1123
1124#define blktrace_msg_all(s, fmt, ...) \
1125do { \
1126 struct cache *_c; \
1127 unsigned i; \
1128 for_each_cache(_c, (s), i) \
1129 blktrace_msg(_c, fmt, ##__VA_ARGS__); \
1130} while (0)
1131
1132static inline void cached_dev_put(struct cached_dev *dc) 1106static inline void cached_dev_put(struct cached_dev *dc)
1133{ 1107{
1134 if (atomic_dec_and_test(&dc->count)) 1108 if (atomic_dec_and_test(&dc->count))
@@ -1173,10 +1147,16 @@ static inline uint8_t bucket_disk_gen(struct bucket *b)
1173 static struct kobj_attribute ksysfs_##n = \ 1147 static struct kobj_attribute ksysfs_##n = \
1174 __ATTR(n, S_IWUSR|S_IRUSR, show, store) 1148 __ATTR(n, S_IWUSR|S_IRUSR, show, store)
1175 1149
1176/* Forward declarations */ 1150static inline void wake_up_allocators(struct cache_set *c)
1151{
1152 struct cache *ca;
1153 unsigned i;
1154
1155 for_each_cache(ca, c, i)
1156 wake_up_process(ca->alloc_thread);
1157}
1177 1158
1178void bch_writeback_queue(struct cached_dev *); 1159/* Forward declarations */
1179void bch_writeback_add(struct cached_dev *, unsigned);
1180 1160
1181void bch_count_io_errors(struct cache *, int, const char *); 1161void bch_count_io_errors(struct cache *, int, const char *);
1182void bch_bbio_count_io_errors(struct cache_set *, struct bio *, 1162void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
@@ -1193,7 +1173,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
1193uint8_t bch_inc_gen(struct cache *, struct bucket *); 1173uint8_t bch_inc_gen(struct cache *, struct bucket *);
1194void bch_rescale_priorities(struct cache_set *, int); 1174void bch_rescale_priorities(struct cache_set *, int);
1195bool bch_bucket_add_unused(struct cache *, struct bucket *); 1175bool bch_bucket_add_unused(struct cache *, struct bucket *);
1196void bch_allocator_thread(struct closure *);
1197 1176
1198long bch_bucket_alloc(struct cache *, unsigned, struct closure *); 1177long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
1199void bch_bucket_free(struct cache_set *, struct bkey *); 1178void bch_bucket_free(struct cache_set *, struct bkey *);
@@ -1241,9 +1220,9 @@ void bch_cache_set_stop(struct cache_set *);
1241struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1220struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1242void bch_btree_cache_free(struct cache_set *); 1221void bch_btree_cache_free(struct cache_set *);
1243int bch_btree_cache_alloc(struct cache_set *); 1222int bch_btree_cache_alloc(struct cache_set *);
1244void bch_cached_dev_writeback_init(struct cached_dev *);
1245void bch_moving_init_cache_set(struct cache_set *); 1223void bch_moving_init_cache_set(struct cache_set *);
1246 1224
1225int bch_cache_allocator_start(struct cache *ca);
1247void bch_cache_allocator_exit(struct cache *ca); 1226void bch_cache_allocator_exit(struct cache *ca);
1248int bch_cache_allocator_init(struct cache *ca); 1227int bch_cache_allocator_init(struct cache *ca);
1249 1228
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 1d27d3af3251..8010eed06a51 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -78,6 +78,7 @@ struct bkey *bch_keylist_pop(struct keylist *l)
78bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) 78bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
79{ 79{
80 unsigned i; 80 unsigned i;
81 char buf[80];
81 82
82 if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))) 83 if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
83 goto bad; 84 goto bad;
@@ -102,7 +103,8 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
102 103
103 return false; 104 return false;
104bad: 105bad:
105 cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k)); 106 bch_bkey_to_text(buf, sizeof(buf), k);
107 cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
106 return true; 108 return true;
107} 109}
108 110
@@ -162,10 +164,16 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
162#ifdef CONFIG_BCACHE_EDEBUG 164#ifdef CONFIG_BCACHE_EDEBUG
163bug: 165bug:
164 mutex_unlock(&b->c->bucket_lock); 166 mutex_unlock(&b->c->bucket_lock);
165 btree_bug(b, 167
168 {
169 char buf[80];
170
171 bch_bkey_to_text(buf, sizeof(buf), k);
172 btree_bug(b,
166"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", 173"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
167 pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), 174 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
168 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); 175 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
176 }
169 return true; 177 return true;
170#endif 178#endif
171} 179}
@@ -1084,33 +1092,39 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
1084 new->sets->size = 0; 1092 new->sets->size = 0;
1085} 1093}
1086 1094
1095#define SORT_CRIT (4096 / sizeof(uint64_t))
1096
1087void bch_btree_sort_lazy(struct btree *b) 1097void bch_btree_sort_lazy(struct btree *b)
1088{ 1098{
1089 if (b->nsets) { 1099 unsigned crit = SORT_CRIT;
1090 unsigned i, j, keys = 0, total; 1100 int i;
1091 1101
1092 for (i = 0; i <= b->nsets; i++) 1102 /* Don't sort if nothing to do */
1093 keys += b->sets[i].data->keys; 1103 if (!b->nsets)
1094 1104 goto out;
1095 total = keys;
1096 1105
1097 for (j = 0; j < b->nsets; j++) { 1106 /* If not a leaf node, always sort */
1098 if (keys * 2 < total || 1107 if (b->level) {
1099 keys < 1000) { 1108 bch_btree_sort(b);
1100 bch_btree_sort_partial(b, j); 1109 return;
1101 return; 1110 }
1102 }
1103 1111
1104 keys -= b->sets[j].data->keys; 1112 for (i = b->nsets - 1; i >= 0; --i) {
1105 } 1113 crit *= b->c->sort_crit_factor;
1106 1114
1107 /* Must sort if b->nsets == 3 or we'll overflow */ 1115 if (b->sets[i].data->keys < crit) {
1108 if (b->nsets >= (MAX_BSETS - 1) - b->level) { 1116 bch_btree_sort_partial(b, i);
1109 bch_btree_sort(b);
1110 return; 1117 return;
1111 } 1118 }
1112 } 1119 }
1113 1120
1121 /* Sort if we'd overflow */
1122 if (b->nsets + 1 == MAX_BSETS) {
1123 bch_btree_sort(b);
1124 return;
1125 }
1126
1127out:
1114 bset_build_written_tree(b); 1128 bset_build_written_tree(b);
1115} 1129}
1116 1130
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 57a9cff41546..ae115a253d73 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,6 +1,8 @@
1#ifndef _BCACHE_BSET_H 1#ifndef _BCACHE_BSET_H
2#define _BCACHE_BSET_H 2#define _BCACHE_BSET_H
3 3
4#include <linux/slab.h>
5
4/* 6/*
5 * BKEYS: 7 * BKEYS:
6 * 8 *
@@ -142,6 +144,8 @@
142 144
143/* Btree key comparison/iteration */ 145/* Btree key comparison/iteration */
144 146
147#define MAX_BSETS 4U
148
145struct btree_iter { 149struct btree_iter {
146 size_t size, used; 150 size_t size, used;
147 struct btree_iter_set { 151 struct btree_iter_set {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 7a5658f04e62..ee372884c405 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -24,6 +24,7 @@
24#include "btree.h" 24#include "btree.h"
25#include "debug.h" 25#include "debug.h"
26#include "request.h" 26#include "request.h"
27#include "writeback.h"
27 28
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/bitops.h> 30#include <linux/bitops.h>
@@ -134,44 +135,17 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
134 return crc ^ 0xffffffffffffffffULL; 135 return crc ^ 0xffffffffffffffffULL;
135} 136}
136 137
137static void btree_bio_endio(struct bio *bio, int error) 138static void bch_btree_node_read_done(struct btree *b)
138{ 139{
139 struct closure *cl = bio->bi_private;
140 struct btree *b = container_of(cl, struct btree, io.cl);
141
142 if (error)
143 set_btree_node_io_error(b);
144
145 bch_bbio_count_io_errors(b->c, bio, error, (bio->bi_rw & WRITE)
146 ? "writing btree" : "reading btree");
147 closure_put(cl);
148}
149
150static void btree_bio_init(struct btree *b)
151{
152 BUG_ON(b->bio);
153 b->bio = bch_bbio_alloc(b->c);
154
155 b->bio->bi_end_io = btree_bio_endio;
156 b->bio->bi_private = &b->io.cl;
157}
158
159void bch_btree_read_done(struct closure *cl)
160{
161 struct btree *b = container_of(cl, struct btree, io.cl);
162 struct bset *i = b->sets[0].data;
163 struct btree_iter *iter = b->c->fill_iter;
164 const char *err = "bad btree header"; 140 const char *err = "bad btree header";
165 BUG_ON(b->nsets || b->written); 141 struct bset *i = b->sets[0].data;
166 142 struct btree_iter *iter;
167 bch_bbio_free(b->bio, b->c);
168 b->bio = NULL;
169 143
170 mutex_lock(&b->c->fill_lock); 144 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
145 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
171 iter->used = 0; 146 iter->used = 0;
172 147
173 if (btree_node_io_error(b) || 148 if (!i->seq)
174 !i->seq)
175 goto err; 149 goto err;
176 150
177 for (; 151 for (;
@@ -228,17 +202,8 @@ void bch_btree_read_done(struct closure *cl)
228 if (b->written < btree_blocks(b)) 202 if (b->written < btree_blocks(b))
229 bch_bset_init_next(b); 203 bch_bset_init_next(b);
230out: 204out:
231 205 mempool_free(iter, b->c->fill_iter);
232 mutex_unlock(&b->c->fill_lock); 206 return;
233
234 spin_lock(&b->c->btree_read_time_lock);
235 bch_time_stats_update(&b->c->btree_read_time, b->io_start_time);
236 spin_unlock(&b->c->btree_read_time_lock);
237
238 smp_wmb(); /* read_done is our write lock */
239 set_btree_node_read_done(b);
240
241 closure_return(cl);
242err: 207err:
243 set_btree_node_io_error(b); 208 set_btree_node_io_error(b);
244 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys", 209 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
@@ -247,48 +212,69 @@ err:
247 goto out; 212 goto out;
248} 213}
249 214
250void bch_btree_read(struct btree *b) 215static void btree_node_read_endio(struct bio *bio, int error)
216{
217 struct closure *cl = bio->bi_private;
218 closure_put(cl);
219}
220
221void bch_btree_node_read(struct btree *b)
251{ 222{
252 BUG_ON(b->nsets || b->written); 223 uint64_t start_time = local_clock();
224 struct closure cl;
225 struct bio *bio;
226
227 trace_bcache_btree_read(b);
228
229 closure_init_stack(&cl);
230
231 bio = bch_bbio_alloc(b->c);
232 bio->bi_rw = REQ_META|READ_SYNC;
233 bio->bi_size = KEY_SIZE(&b->key) << 9;
234 bio->bi_end_io = btree_node_read_endio;
235 bio->bi_private = &cl;
236
237 bch_bio_map(bio, b->sets[0].data);
238
239 bch_submit_bbio(bio, b->c, &b->key, 0);
240 closure_sync(&cl);
253 241
254 if (!closure_trylock(&b->io.cl, &b->c->cl)) 242 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
255 BUG(); 243 set_btree_node_io_error(b);
256 244
257 b->io_start_time = local_clock(); 245 bch_bbio_free(bio, b->c);
258 246
259 btree_bio_init(b); 247 if (btree_node_io_error(b))
260 b->bio->bi_rw = REQ_META|READ_SYNC; 248 goto err;
261 b->bio->bi_size = KEY_SIZE(&b->key) << 9;
262 249
263 bch_bio_map(b->bio, b->sets[0].data); 250 bch_btree_node_read_done(b);
264 251
265 pr_debug("%s", pbtree(b)); 252 spin_lock(&b->c->btree_read_time_lock);
266 trace_bcache_btree_read(b->bio); 253 bch_time_stats_update(&b->c->btree_read_time, start_time);
267 bch_submit_bbio(b->bio, b->c, &b->key, 0); 254 spin_unlock(&b->c->btree_read_time_lock);
268 255
269 continue_at(&b->io.cl, bch_btree_read_done, system_wq); 256 return;
257err:
258 bch_cache_set_error(b->c, "io error reading bucket %lu",
259 PTR_BUCKET_NR(b->c, &b->key, 0));
270} 260}
271 261
272static void btree_complete_write(struct btree *b, struct btree_write *w) 262static void btree_complete_write(struct btree *b, struct btree_write *w)
273{ 263{
274 if (w->prio_blocked && 264 if (w->prio_blocked &&
275 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 265 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
276 wake_up(&b->c->alloc_wait); 266 wake_up_allocators(b->c);
277 267
278 if (w->journal) { 268 if (w->journal) {
279 atomic_dec_bug(w->journal); 269 atomic_dec_bug(w->journal);
280 __closure_wake_up(&b->c->journal.wait); 270 __closure_wake_up(&b->c->journal.wait);
281 } 271 }
282 272
283 if (w->owner)
284 closure_put(w->owner);
285
286 w->prio_blocked = 0; 273 w->prio_blocked = 0;
287 w->journal = NULL; 274 w->journal = NULL;
288 w->owner = NULL;
289} 275}
290 276
291static void __btree_write_done(struct closure *cl) 277static void __btree_node_write_done(struct closure *cl)
292{ 278{
293 struct btree *b = container_of(cl, struct btree, io.cl); 279 struct btree *b = container_of(cl, struct btree, io.cl);
294 struct btree_write *w = btree_prev_write(b); 280 struct btree_write *w = btree_prev_write(b);
@@ -304,7 +290,7 @@ static void __btree_write_done(struct closure *cl)
304 closure_return(cl); 290 closure_return(cl);
305} 291}
306 292
307static void btree_write_done(struct closure *cl) 293static void btree_node_write_done(struct closure *cl)
308{ 294{
309 struct btree *b = container_of(cl, struct btree, io.cl); 295 struct btree *b = container_of(cl, struct btree, io.cl);
310 struct bio_vec *bv; 296 struct bio_vec *bv;
@@ -313,10 +299,22 @@ static void btree_write_done(struct closure *cl)
313 __bio_for_each_segment(bv, b->bio, n, 0) 299 __bio_for_each_segment(bv, b->bio, n, 0)
314 __free_page(bv->bv_page); 300 __free_page(bv->bv_page);
315 301
316 __btree_write_done(cl); 302 __btree_node_write_done(cl);
317} 303}
318 304
319static void do_btree_write(struct btree *b) 305static void btree_node_write_endio(struct bio *bio, int error)
306{
307 struct closure *cl = bio->bi_private;
308 struct btree *b = container_of(cl, struct btree, io.cl);
309
310 if (error)
311 set_btree_node_io_error(b);
312
313 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
314 closure_put(cl);
315}
316
317static void do_btree_node_write(struct btree *b)
320{ 318{
321 struct closure *cl = &b->io.cl; 319 struct closure *cl = &b->io.cl;
322 struct bset *i = b->sets[b->nsets].data; 320 struct bset *i = b->sets[b->nsets].data;
@@ -325,15 +323,34 @@ static void do_btree_write(struct btree *b)
325 i->version = BCACHE_BSET_VERSION; 323 i->version = BCACHE_BSET_VERSION;
326 i->csum = btree_csum_set(b, i); 324 i->csum = btree_csum_set(b, i);
327 325
328 btree_bio_init(b); 326 BUG_ON(b->bio);
329 b->bio->bi_rw = REQ_META|WRITE_SYNC; 327 b->bio = bch_bbio_alloc(b->c);
330 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 328
329 b->bio->bi_end_io = btree_node_write_endio;
330 b->bio->bi_private = &b->io.cl;
331 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
332 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
331 bch_bio_map(b->bio, i); 333 bch_bio_map(b->bio, i);
332 334
335 /*
336 * If we're appending to a leaf node, we don't technically need FUA -
337 * this write just needs to be persisted before the next journal write,
338 * which will be marked FLUSH|FUA.
339 *
340 * Similarly if we're writing a new btree root - the pointer is going to
341 * be in the next journal entry.
342 *
343 * But if we're writing a new btree node (that isn't a root) or
344 * appending to a non leaf btree node, we need either FUA or a flush
345 * when we write the parent with the new pointer. FUA is cheaper than a
346 * flush, and writes appending to leaf nodes aren't blocking anything so
347 * just make all btree node writes FUA to keep things sane.
348 */
349
333 bkey_copy(&k.key, &b->key); 350 bkey_copy(&k.key, &b->key);
334 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); 351 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
335 352
336 if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) { 353 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
337 int j; 354 int j;
338 struct bio_vec *bv; 355 struct bio_vec *bv;
339 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 356 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -342,40 +359,41 @@ static void do_btree_write(struct btree *b)
342 memcpy(page_address(bv->bv_page), 359 memcpy(page_address(bv->bv_page),
343 base + j * PAGE_SIZE, PAGE_SIZE); 360 base + j * PAGE_SIZE, PAGE_SIZE);
344 361
345 trace_bcache_btree_write(b->bio);
346 bch_submit_bbio(b->bio, b->c, &k.key, 0); 362 bch_submit_bbio(b->bio, b->c, &k.key, 0);
347 363
348 continue_at(cl, btree_write_done, NULL); 364 continue_at(cl, btree_node_write_done, NULL);
349 } else { 365 } else {
350 b->bio->bi_vcnt = 0; 366 b->bio->bi_vcnt = 0;
351 bch_bio_map(b->bio, i); 367 bch_bio_map(b->bio, i);
352 368
353 trace_bcache_btree_write(b->bio);
354 bch_submit_bbio(b->bio, b->c, &k.key, 0); 369 bch_submit_bbio(b->bio, b->c, &k.key, 0);
355 370
356 closure_sync(cl); 371 closure_sync(cl);
357 __btree_write_done(cl); 372 __btree_node_write_done(cl);
358 } 373 }
359} 374}
360 375
361static void __btree_write(struct btree *b) 376void bch_btree_node_write(struct btree *b, struct closure *parent)
362{ 377{
363 struct bset *i = b->sets[b->nsets].data; 378 struct bset *i = b->sets[b->nsets].data;
364 379
380 trace_bcache_btree_write(b);
381
365 BUG_ON(current->bio_list); 382 BUG_ON(current->bio_list);
383 BUG_ON(b->written >= btree_blocks(b));
384 BUG_ON(b->written && !i->keys);
385 BUG_ON(b->sets->data->seq != i->seq);
386 bch_check_key_order(b, i);
366 387
367 closure_lock(&b->io, &b->c->cl);
368 cancel_delayed_work(&b->work); 388 cancel_delayed_work(&b->work);
369 389
390 /* If caller isn't waiting for write, parent refcount is cache set */
391 closure_lock(&b->io, parent ?: &b->c->cl);
392
370 clear_bit(BTREE_NODE_dirty, &b->flags); 393 clear_bit(BTREE_NODE_dirty, &b->flags);
371 change_bit(BTREE_NODE_write_idx, &b->flags); 394 change_bit(BTREE_NODE_write_idx, &b->flags);
372 395
373 bch_check_key_order(b, i); 396 do_btree_node_write(b);
374 BUG_ON(b->written && !i->keys);
375
376 do_btree_write(b);
377
378 pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys);
379 397
380 b->written += set_blocks(i, b->c); 398 b->written += set_blocks(i, b->c);
381 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size, 399 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
@@ -387,37 +405,31 @@ static void __btree_write(struct btree *b)
387 bch_bset_init_next(b); 405 bch_bset_init_next(b);
388} 406}
389 407
390static void btree_write_work(struct work_struct *w) 408static void btree_node_write_work(struct work_struct *w)
391{ 409{
392 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 410 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
393 411
394 down_write(&b->lock); 412 rw_lock(true, b, b->level);
395 413
396 if (btree_node_dirty(b)) 414 if (btree_node_dirty(b))
397 __btree_write(b); 415 bch_btree_node_write(b, NULL);
398 up_write(&b->lock); 416 rw_unlock(true, b);
399} 417}
400 418
401void bch_btree_write(struct btree *b, bool now, struct btree_op *op) 419static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
402{ 420{
403 struct bset *i = b->sets[b->nsets].data; 421 struct bset *i = b->sets[b->nsets].data;
404 struct btree_write *w = btree_current_write(b); 422 struct btree_write *w = btree_current_write(b);
405 423
406 BUG_ON(b->written && 424 BUG_ON(!b->written);
407 (b->written >= btree_blocks(b) || 425 BUG_ON(!i->keys);
408 i->seq != b->sets[0].data->seq ||
409 !i->keys));
410 426
411 if (!btree_node_dirty(b)) { 427 if (!btree_node_dirty(b))
412 set_btree_node_dirty(b); 428 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
413 queue_delayed_work(btree_io_wq, &b->work,
414 msecs_to_jiffies(30000));
415 }
416 429
417 w->prio_blocked += b->prio_blocked; 430 set_btree_node_dirty(b);
418 b->prio_blocked = 0;
419 431
420 if (op && op->journal && !b->level) { 432 if (op && op->journal) {
421 if (w->journal && 433 if (w->journal &&
422 journal_pin_cmp(b->c, w, op)) { 434 journal_pin_cmp(b->c, w, op)) {
423 atomic_dec_bug(w->journal); 435 atomic_dec_bug(w->journal);
@@ -430,23 +442,10 @@ void bch_btree_write(struct btree *b, bool now, struct btree_op *op)
430 } 442 }
431 } 443 }
432 444
433 if (current->bio_list)
434 return;
435
436 /* Force write if set is too big */ 445 /* Force write if set is too big */
437 if (now || 446 if (set_bytes(i) > PAGE_SIZE - 48 &&
438 b->level || 447 !current->bio_list)
439 set_bytes(i) > PAGE_SIZE - 48) { 448 bch_btree_node_write(b, NULL);
440 if (op && now) {
441 /* Must wait on multiple writes */
442 BUG_ON(w->owner);
443 w->owner = &op->cl;
444 closure_get(&op->cl);
445 }
446
447 __btree_write(b);
448 }
449 BUG_ON(!b->written);
450} 449}
451 450
452/* 451/*
@@ -559,7 +558,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
559 init_rwsem(&b->lock); 558 init_rwsem(&b->lock);
560 lockdep_set_novalidate_class(&b->lock); 559 lockdep_set_novalidate_class(&b->lock);
561 INIT_LIST_HEAD(&b->list); 560 INIT_LIST_HEAD(&b->list);
562 INIT_DELAYED_WORK(&b->work, btree_write_work); 561 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
563 b->c = c; 562 b->c = c;
564 closure_init_unlocked(&b->io); 563 closure_init_unlocked(&b->io);
565 564
@@ -582,7 +581,7 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
582 BUG_ON(btree_node_dirty(b) && !b->sets[0].data); 581 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
583 582
584 if (cl && btree_node_dirty(b)) 583 if (cl && btree_node_dirty(b))
585 bch_btree_write(b, true, NULL); 584 bch_btree_node_write(b, NULL);
586 585
587 if (cl) 586 if (cl)
588 closure_wait_event_async(&b->io.wait, cl, 587 closure_wait_event_async(&b->io.wait, cl,
@@ -623,6 +622,13 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
623 else if (!mutex_trylock(&c->bucket_lock)) 622 else if (!mutex_trylock(&c->bucket_lock))
624 return -1; 623 return -1;
625 624
625 /*
626 * It's _really_ critical that we don't free too many btree nodes - we
627 * have to always leave ourselves a reserve. The reserve is how we
628 * guarantee that allocating memory for a new btree node can always
629 * succeed, so that inserting keys into the btree can always succeed and
630 * IO can always make forward progress:
631 */
626 nr /= c->btree_pages; 632 nr /= c->btree_pages;
627 nr = min_t(unsigned long, nr, mca_can_free(c)); 633 nr = min_t(unsigned long, nr, mca_can_free(c));
628 634
@@ -766,6 +772,8 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
766 int ret = -ENOMEM; 772 int ret = -ENOMEM;
767 struct btree *i; 773 struct btree *i;
768 774
775 trace_bcache_btree_cache_cannibalize(c);
776
769 if (!cl) 777 if (!cl)
770 return ERR_PTR(-ENOMEM); 778 return ERR_PTR(-ENOMEM);
771 779
@@ -784,7 +792,6 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
784 return ERR_PTR(-EAGAIN); 792 return ERR_PTR(-EAGAIN);
785 } 793 }
786 794
787 /* XXX: tracepoint */
788 c->try_harder = cl; 795 c->try_harder = cl;
789 c->try_harder_start = local_clock(); 796 c->try_harder_start = local_clock();
790retry: 797retry:
@@ -905,6 +912,9 @@ retry:
905 b = mca_find(c, k); 912 b = mca_find(c, k);
906 913
907 if (!b) { 914 if (!b) {
915 if (current->bio_list)
916 return ERR_PTR(-EAGAIN);
917
908 mutex_lock(&c->bucket_lock); 918 mutex_lock(&c->bucket_lock);
909 b = mca_alloc(c, k, level, &op->cl); 919 b = mca_alloc(c, k, level, &op->cl);
910 mutex_unlock(&c->bucket_lock); 920 mutex_unlock(&c->bucket_lock);
@@ -914,7 +924,7 @@ retry:
914 if (IS_ERR(b)) 924 if (IS_ERR(b))
915 return b; 925 return b;
916 926
917 bch_btree_read(b); 927 bch_btree_node_read(b);
918 928
919 if (!write) 929 if (!write)
920 downgrade_write(&b->lock); 930 downgrade_write(&b->lock);
@@ -937,15 +947,12 @@ retry:
937 for (; i <= b->nsets; i++) 947 for (; i <= b->nsets; i++)
938 prefetch(b->sets[i].data); 948 prefetch(b->sets[i].data);
939 949
940 if (!closure_wait_event(&b->io.wait, &op->cl, 950 if (btree_node_io_error(b)) {
941 btree_node_read_done(b))) {
942 rw_unlock(write, b);
943 b = ERR_PTR(-EAGAIN);
944 } else if (btree_node_io_error(b)) {
945 rw_unlock(write, b); 951 rw_unlock(write, b);
946 b = ERR_PTR(-EIO); 952 return ERR_PTR(-EIO);
947 } else 953 }
948 BUG_ON(!b->written); 954
955 BUG_ON(!b->written);
949 956
950 return b; 957 return b;
951} 958}
@@ -959,7 +966,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
959 mutex_unlock(&c->bucket_lock); 966 mutex_unlock(&c->bucket_lock);
960 967
961 if (!IS_ERR_OR_NULL(b)) { 968 if (!IS_ERR_OR_NULL(b)) {
962 bch_btree_read(b); 969 bch_btree_node_read(b);
963 rw_unlock(true, b); 970 rw_unlock(true, b);
964 } 971 }
965} 972}
@@ -970,24 +977,19 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
970{ 977{
971 unsigned i; 978 unsigned i;
972 979
980 trace_bcache_btree_node_free(b);
981
973 /* 982 /*
974 * The BUG_ON() in btree_node_get() implies that we must have a write 983 * The BUG_ON() in btree_node_get() implies that we must have a write
975 * lock on parent to free or even invalidate a node 984 * lock on parent to free or even invalidate a node
976 */ 985 */
977 BUG_ON(op->lock <= b->level); 986 BUG_ON(op->lock <= b->level);
978 BUG_ON(b == b->c->root); 987 BUG_ON(b == b->c->root);
979 pr_debug("bucket %s", pbtree(b));
980 988
981 if (btree_node_dirty(b)) 989 if (btree_node_dirty(b))
982 btree_complete_write(b, btree_current_write(b)); 990 btree_complete_write(b, btree_current_write(b));
983 clear_bit(BTREE_NODE_dirty, &b->flags); 991 clear_bit(BTREE_NODE_dirty, &b->flags);
984 992
985 if (b->prio_blocked &&
986 !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked))
987 wake_up(&b->c->alloc_wait);
988
989 b->prio_blocked = 0;
990
991 cancel_delayed_work(&b->work); 993 cancel_delayed_work(&b->work);
992 994
993 mutex_lock(&b->c->bucket_lock); 995 mutex_lock(&b->c->bucket_lock);
@@ -1028,17 +1030,20 @@ retry:
1028 goto retry; 1030 goto retry;
1029 } 1031 }
1030 1032
1031 set_btree_node_read_done(b);
1032 b->accessed = 1; 1033 b->accessed = 1;
1033 bch_bset_init_next(b); 1034 bch_bset_init_next(b);
1034 1035
1035 mutex_unlock(&c->bucket_lock); 1036 mutex_unlock(&c->bucket_lock);
1037
1038 trace_bcache_btree_node_alloc(b);
1036 return b; 1039 return b;
1037err_free: 1040err_free:
1038 bch_bucket_free(c, &k.key); 1041 bch_bucket_free(c, &k.key);
1039 __bkey_put(c, &k.key); 1042 __bkey_put(c, &k.key);
1040err: 1043err:
1041 mutex_unlock(&c->bucket_lock); 1044 mutex_unlock(&c->bucket_lock);
1045
1046 trace_bcache_btree_node_alloc_fail(b);
1042 return b; 1047 return b;
1043} 1048}
1044 1049
@@ -1137,11 +1142,8 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
1137 gc->nkeys++; 1142 gc->nkeys++;
1138 1143
1139 gc->data += KEY_SIZE(k); 1144 gc->data += KEY_SIZE(k);
1140 if (KEY_DIRTY(k)) { 1145 if (KEY_DIRTY(k))
1141 gc->dirty += KEY_SIZE(k); 1146 gc->dirty += KEY_SIZE(k);
1142 if (d)
1143 d->sectors_dirty_gc += KEY_SIZE(k);
1144 }
1145 } 1147 }
1146 1148
1147 for (t = b->sets; t <= &b->sets[b->nsets]; t++) 1149 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1166,14 +1168,11 @@ static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
1166 1168
1167 if (!IS_ERR_OR_NULL(n)) { 1169 if (!IS_ERR_OR_NULL(n)) {
1168 swap(b, n); 1170 swap(b, n);
1171 __bkey_put(b->c, &b->key);
1169 1172
1170 memcpy(k->ptr, b->key.ptr, 1173 memcpy(k->ptr, b->key.ptr,
1171 sizeof(uint64_t) * KEY_PTRS(&b->key)); 1174 sizeof(uint64_t) * KEY_PTRS(&b->key));
1172 1175
1173 __bkey_put(b->c, &b->key);
1174 atomic_inc(&b->c->prio_blocked);
1175 b->prio_blocked++;
1176
1177 btree_node_free(n, op); 1176 btree_node_free(n, op);
1178 up_write(&n->lock); 1177 up_write(&n->lock);
1179 } 1178 }
@@ -1278,7 +1277,7 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
1278 btree_node_free(r->b, op); 1277 btree_node_free(r->b, op);
1279 up_write(&r->b->lock); 1278 up_write(&r->b->lock);
1280 1279
1281 pr_debug("coalesced %u nodes", nodes); 1280 trace_bcache_btree_gc_coalesce(nodes);
1282 1281
1283 gc->nodes--; 1282 gc->nodes--;
1284 nodes--; 1283 nodes--;
@@ -1293,14 +1292,9 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1293 void write(struct btree *r) 1292 void write(struct btree *r)
1294 { 1293 {
1295 if (!r->written) 1294 if (!r->written)
1296 bch_btree_write(r, true, op); 1295 bch_btree_node_write(r, &op->cl);
1297 else if (btree_node_dirty(r)) { 1296 else if (btree_node_dirty(r))
1298 BUG_ON(btree_current_write(r)->owner); 1297 bch_btree_node_write(r, writes);
1299 btree_current_write(r)->owner = writes;
1300 closure_get(writes);
1301
1302 bch_btree_write(r, true, NULL);
1303 }
1304 1298
1305 up_write(&r->lock); 1299 up_write(&r->lock);
1306 } 1300 }
@@ -1386,9 +1380,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1386 ret = btree_gc_recurse(b, op, writes, gc); 1380 ret = btree_gc_recurse(b, op, writes, gc);
1387 1381
1388 if (!b->written || btree_node_dirty(b)) { 1382 if (!b->written || btree_node_dirty(b)) {
1389 atomic_inc(&b->c->prio_blocked); 1383 bch_btree_node_write(b, n ? &op->cl : NULL);
1390 b->prio_blocked++;
1391 bch_btree_write(b, true, n ? op : NULL);
1392 } 1384 }
1393 1385
1394 if (!IS_ERR_OR_NULL(n)) { 1386 if (!IS_ERR_OR_NULL(n)) {
@@ -1405,7 +1397,6 @@ static void btree_gc_start(struct cache_set *c)
1405{ 1397{
1406 struct cache *ca; 1398 struct cache *ca;
1407 struct bucket *b; 1399 struct bucket *b;
1408 struct bcache_device **d;
1409 unsigned i; 1400 unsigned i;
1410 1401
1411 if (!c->gc_mark_valid) 1402 if (!c->gc_mark_valid)
@@ -1419,16 +1410,12 @@ static void btree_gc_start(struct cache_set *c)
1419 for_each_cache(ca, c, i) 1410 for_each_cache(ca, c, i)
1420 for_each_bucket(b, ca) { 1411 for_each_bucket(b, ca) {
1421 b->gc_gen = b->gen; 1412 b->gc_gen = b->gen;
1422 if (!atomic_read(&b->pin)) 1413 if (!atomic_read(&b->pin)) {
1423 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 1414 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
1415 SET_GC_SECTORS_USED(b, 0);
1416 }
1424 } 1417 }
1425 1418
1426 for (d = c->devices;
1427 d < c->devices + c->nr_uuids;
1428 d++)
1429 if (*d)
1430 (*d)->sectors_dirty_gc = 0;
1431
1432 mutex_unlock(&c->bucket_lock); 1419 mutex_unlock(&c->bucket_lock);
1433} 1420}
1434 1421
@@ -1437,7 +1424,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1437 size_t available = 0; 1424 size_t available = 0;
1438 struct bucket *b; 1425 struct bucket *b;
1439 struct cache *ca; 1426 struct cache *ca;
1440 struct bcache_device **d;
1441 unsigned i; 1427 unsigned i;
1442 1428
1443 mutex_lock(&c->bucket_lock); 1429 mutex_lock(&c->bucket_lock);
@@ -1480,22 +1466,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1480 } 1466 }
1481 } 1467 }
1482 1468
1483 for (d = c->devices;
1484 d < c->devices + c->nr_uuids;
1485 d++)
1486 if (*d) {
1487 unsigned long last =
1488 atomic_long_read(&((*d)->sectors_dirty));
1489 long difference = (*d)->sectors_dirty_gc - last;
1490
1491 pr_debug("sectors dirty off by %li", difference);
1492
1493 (*d)->sectors_dirty_last += difference;
1494
1495 atomic_long_set(&((*d)->sectors_dirty),
1496 (*d)->sectors_dirty_gc);
1497 }
1498
1499 mutex_unlock(&c->bucket_lock); 1469 mutex_unlock(&c->bucket_lock);
1500 return available; 1470 return available;
1501} 1471}
@@ -1508,10 +1478,9 @@ static void bch_btree_gc(struct closure *cl)
1508 struct gc_stat stats; 1478 struct gc_stat stats;
1509 struct closure writes; 1479 struct closure writes;
1510 struct btree_op op; 1480 struct btree_op op;
1511
1512 uint64_t start_time = local_clock(); 1481 uint64_t start_time = local_clock();
1513 trace_bcache_gc_start(c->sb.set_uuid); 1482
1514 blktrace_msg_all(c, "Starting gc"); 1483 trace_bcache_gc_start(c);
1515 1484
1516 memset(&stats, 0, sizeof(struct gc_stat)); 1485 memset(&stats, 0, sizeof(struct gc_stat));
1517 closure_init_stack(&writes); 1486 closure_init_stack(&writes);
@@ -1520,14 +1489,14 @@ static void bch_btree_gc(struct closure *cl)
1520 1489
1521 btree_gc_start(c); 1490 btree_gc_start(c);
1522 1491
1492 atomic_inc(&c->prio_blocked);
1493
1523 ret = btree_root(gc_root, c, &op, &writes, &stats); 1494 ret = btree_root(gc_root, c, &op, &writes, &stats);
1524 closure_sync(&op.cl); 1495 closure_sync(&op.cl);
1525 closure_sync(&writes); 1496 closure_sync(&writes);
1526 1497
1527 if (ret) { 1498 if (ret) {
1528 blktrace_msg_all(c, "Stopped gc");
1529 pr_warn("gc failed!"); 1499 pr_warn("gc failed!");
1530
1531 continue_at(cl, bch_btree_gc, bch_gc_wq); 1500 continue_at(cl, bch_btree_gc, bch_gc_wq);
1532 } 1501 }
1533 1502
@@ -1537,6 +1506,9 @@ static void bch_btree_gc(struct closure *cl)
1537 1506
1538 available = bch_btree_gc_finish(c); 1507 available = bch_btree_gc_finish(c);
1539 1508
1509 atomic_dec(&c->prio_blocked);
1510 wake_up_allocators(c);
1511
1540 bch_time_stats_update(&c->btree_gc_time, start_time); 1512 bch_time_stats_update(&c->btree_gc_time, start_time);
1541 1513
1542 stats.key_bytes *= sizeof(uint64_t); 1514 stats.key_bytes *= sizeof(uint64_t);
@@ -1544,10 +1516,8 @@ static void bch_btree_gc(struct closure *cl)
1544 stats.data <<= 9; 1516 stats.data <<= 9;
1545 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; 1517 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1546 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1518 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1547 blktrace_msg_all(c, "Finished gc");
1548 1519
1549 trace_bcache_gc_end(c->sb.set_uuid); 1520 trace_bcache_gc_end(c);
1550 wake_up(&c->alloc_wait);
1551 1521
1552 continue_at(cl, bch_moving_gc, bch_gc_wq); 1522 continue_at(cl, bch_moving_gc, bch_gc_wq);
1553} 1523}
@@ -1654,14 +1624,14 @@ static bool fix_overlapping_extents(struct btree *b,
1654 struct btree_iter *iter, 1624 struct btree_iter *iter,
1655 struct btree_op *op) 1625 struct btree_op *op)
1656{ 1626{
1657 void subtract_dirty(struct bkey *k, int sectors) 1627 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
1658 { 1628 {
1659 struct bcache_device *d = b->c->devices[KEY_INODE(k)]; 1629 if (KEY_DIRTY(k))
1660 1630 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1661 if (KEY_DIRTY(k) && d) 1631 offset, -sectors);
1662 atomic_long_sub(sectors, &d->sectors_dirty);
1663 } 1632 }
1664 1633
1634 uint64_t old_offset;
1665 unsigned old_size, sectors_found = 0; 1635 unsigned old_size, sectors_found = 0;
1666 1636
1667 while (1) { 1637 while (1) {
@@ -1673,6 +1643,7 @@ static bool fix_overlapping_extents(struct btree *b,
1673 if (bkey_cmp(k, &START_KEY(insert)) <= 0) 1643 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1674 continue; 1644 continue;
1675 1645
1646 old_offset = KEY_START(k);
1676 old_size = KEY_SIZE(k); 1647 old_size = KEY_SIZE(k);
1677 1648
1678 /* 1649 /*
@@ -1728,7 +1699,7 @@ static bool fix_overlapping_extents(struct btree *b,
1728 1699
1729 struct bkey *top; 1700 struct bkey *top;
1730 1701
1731 subtract_dirty(k, KEY_SIZE(insert)); 1702 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
1732 1703
1733 if (bkey_written(b, k)) { 1704 if (bkey_written(b, k)) {
1734 /* 1705 /*
@@ -1775,7 +1746,7 @@ static bool fix_overlapping_extents(struct btree *b,
1775 } 1746 }
1776 } 1747 }
1777 1748
1778 subtract_dirty(k, old_size - KEY_SIZE(k)); 1749 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
1779 } 1750 }
1780 1751
1781check_failed: 1752check_failed:
@@ -1798,7 +1769,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1798{ 1769{
1799 struct bset *i = b->sets[b->nsets].data; 1770 struct bset *i = b->sets[b->nsets].data;
1800 struct bkey *m, *prev; 1771 struct bkey *m, *prev;
1801 const char *status = "insert"; 1772 unsigned status = BTREE_INSERT_STATUS_INSERT;
1802 1773
1803 BUG_ON(bkey_cmp(k, &b->key) > 0); 1774 BUG_ON(bkey_cmp(k, &b->key) > 0);
1804 BUG_ON(b->level && !KEY_PTRS(k)); 1775 BUG_ON(b->level && !KEY_PTRS(k));
@@ -1831,17 +1802,17 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1831 goto insert; 1802 goto insert;
1832 1803
1833 /* prev is in the tree, if we merge we're done */ 1804 /* prev is in the tree, if we merge we're done */
1834 status = "back merging"; 1805 status = BTREE_INSERT_STATUS_BACK_MERGE;
1835 if (prev && 1806 if (prev &&
1836 bch_bkey_try_merge(b, prev, k)) 1807 bch_bkey_try_merge(b, prev, k))
1837 goto merged; 1808 goto merged;
1838 1809
1839 status = "overwrote front"; 1810 status = BTREE_INSERT_STATUS_OVERWROTE;
1840 if (m != end(i) && 1811 if (m != end(i) &&
1841 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) 1812 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1842 goto copy; 1813 goto copy;
1843 1814
1844 status = "front merge"; 1815 status = BTREE_INSERT_STATUS_FRONT_MERGE;
1845 if (m != end(i) && 1816 if (m != end(i) &&
1846 bch_bkey_try_merge(b, k, m)) 1817 bch_bkey_try_merge(b, k, m))
1847 goto copy; 1818 goto copy;
@@ -1851,21 +1822,21 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1851insert: shift_keys(b, m, k); 1822insert: shift_keys(b, m, k);
1852copy: bkey_copy(m, k); 1823copy: bkey_copy(m, k);
1853merged: 1824merged:
1854 bch_check_keys(b, "%s for %s at %s: %s", status, 1825 if (KEY_DIRTY(k))
1855 op_type(op), pbtree(b), pkey(k)); 1826 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1856 bch_check_key_order_msg(b, i, "%s for %s at %s: %s", status, 1827 KEY_START(k), KEY_SIZE(k));
1857 op_type(op), pbtree(b), pkey(k)); 1828
1829 bch_check_keys(b, "%u for %s", status, op_type(op));
1858 1830
1859 if (b->level && !KEY_OFFSET(k)) 1831 if (b->level && !KEY_OFFSET(k))
1860 b->prio_blocked++; 1832 btree_current_write(b)->prio_blocked++;
1861 1833
1862 pr_debug("%s for %s at %s: %s", status, 1834 trace_bcache_btree_insert_key(b, k, op->type, status);
1863 op_type(op), pbtree(b), pkey(k));
1864 1835
1865 return true; 1836 return true;
1866} 1837}
1867 1838
1868bool bch_btree_insert_keys(struct btree *b, struct btree_op *op) 1839static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
1869{ 1840{
1870 bool ret = false; 1841 bool ret = false;
1871 struct bkey *k; 1842 struct bkey *k;
@@ -1896,7 +1867,7 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
1896 should_split(b)) 1867 should_split(b))
1897 goto out; 1868 goto out;
1898 1869
1899 op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio)); 1870 op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));
1900 1871
1901 SET_KEY_PTRS(&op->replace, 1); 1872 SET_KEY_PTRS(&op->replace, 1);
1902 get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); 1873 get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
@@ -1907,7 +1878,6 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
1907 1878
1908 BUG_ON(op->type != BTREE_INSERT); 1879 BUG_ON(op->type != BTREE_INSERT);
1909 BUG_ON(!btree_insert_key(b, op, &tmp.k)); 1880 BUG_ON(!btree_insert_key(b, op, &tmp.k));
1910 bch_btree_write(b, false, NULL);
1911 ret = true; 1881 ret = true;
1912out: 1882out:
1913 downgrade_write(&b->lock); 1883 downgrade_write(&b->lock);
@@ -1929,12 +1899,11 @@ static int btree_split(struct btree *b, struct btree_op *op)
1929 1899
1930 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5; 1900 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
1931 1901
1932 pr_debug("%ssplitting at %s keys %i", split ? "" : "not ",
1933 pbtree(b), n1->sets[0].data->keys);
1934
1935 if (split) { 1902 if (split) {
1936 unsigned keys = 0; 1903 unsigned keys = 0;
1937 1904
1905 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
1906
1938 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); 1907 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
1939 if (IS_ERR(n2)) 1908 if (IS_ERR(n2))
1940 goto err_free1; 1909 goto err_free1;
@@ -1967,18 +1936,21 @@ static int btree_split(struct btree *b, struct btree_op *op)
1967 bkey_copy_key(&n2->key, &b->key); 1936 bkey_copy_key(&n2->key, &b->key);
1968 1937
1969 bch_keylist_add(&op->keys, &n2->key); 1938 bch_keylist_add(&op->keys, &n2->key);
1970 bch_btree_write(n2, true, op); 1939 bch_btree_node_write(n2, &op->cl);
1971 rw_unlock(true, n2); 1940 rw_unlock(true, n2);
1972 } else 1941 } else {
1942 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
1943
1973 bch_btree_insert_keys(n1, op); 1944 bch_btree_insert_keys(n1, op);
1945 }
1974 1946
1975 bch_keylist_add(&op->keys, &n1->key); 1947 bch_keylist_add(&op->keys, &n1->key);
1976 bch_btree_write(n1, true, op); 1948 bch_btree_node_write(n1, &op->cl);
1977 1949
1978 if (n3) { 1950 if (n3) {
1979 bkey_copy_key(&n3->key, &MAX_KEY); 1951 bkey_copy_key(&n3->key, &MAX_KEY);
1980 bch_btree_insert_keys(n3, op); 1952 bch_btree_insert_keys(n3, op);
1981 bch_btree_write(n3, true, op); 1953 bch_btree_node_write(n3, &op->cl);
1982 1954
1983 closure_sync(&op->cl); 1955 closure_sync(&op->cl);
1984 bch_btree_set_root(n3); 1956 bch_btree_set_root(n3);
@@ -2082,8 +2054,12 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
2082 2054
2083 BUG_ON(write_block(b) != b->sets[b->nsets].data); 2055 BUG_ON(write_block(b) != b->sets[b->nsets].data);
2084 2056
2085 if (bch_btree_insert_keys(b, op)) 2057 if (bch_btree_insert_keys(b, op)) {
2086 bch_btree_write(b, false, op); 2058 if (!b->level)
2059 bch_btree_leaf_dirty(b, op);
2060 else
2061 bch_btree_node_write(b, &op->cl);
2062 }
2087 } 2063 }
2088 2064
2089 return 0; 2065 return 0;
@@ -2140,6 +2116,11 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c)
2140void bch_btree_set_root(struct btree *b) 2116void bch_btree_set_root(struct btree *b)
2141{ 2117{
2142 unsigned i; 2118 unsigned i;
2119 struct closure cl;
2120
2121 closure_init_stack(&cl);
2122
2123 trace_bcache_btree_set_root(b);
2143 2124
2144 BUG_ON(!b->written); 2125 BUG_ON(!b->written);
2145 2126
@@ -2153,8 +2134,8 @@ void bch_btree_set_root(struct btree *b)
2153 b->c->root = b; 2134 b->c->root = b;
2154 __bkey_put(b->c, &b->key); 2135 __bkey_put(b->c, &b->key);
2155 2136
2156 bch_journal_meta(b->c, NULL); 2137 bch_journal_meta(b->c, &cl);
2157 pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0)); 2138 closure_sync(&cl);
2158} 2139}
2159 2140
2160/* Cache lookup */ 2141/* Cache lookup */
@@ -2215,9 +2196,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2215 KEY_OFFSET(k) - bio->bi_sector); 2196 KEY_OFFSET(k) - bio->bi_sector);
2216 2197
2217 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 2198 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
2218 if (!n)
2219 return -EAGAIN;
2220
2221 if (n == bio) 2199 if (n == bio)
2222 op->lookup_done = true; 2200 op->lookup_done = true;
2223 2201
@@ -2240,7 +2218,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2240 n->bi_end_io = bch_cache_read_endio; 2218 n->bi_end_io = bch_cache_read_endio;
2241 n->bi_private = &s->cl; 2219 n->bi_private = &s->cl;
2242 2220
2243 trace_bcache_cache_hit(n);
2244 __bch_submit_bbio(n, b->c); 2221 __bch_submit_bbio(n, b->c);
2245 } 2222 }
2246 2223
@@ -2257,9 +2234,6 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2257 struct btree_iter iter; 2234 struct btree_iter iter;
2258 bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0)); 2235 bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
2259 2236
2260 pr_debug("at %s searching for %u:%llu", pbtree(b), op->inode,
2261 (uint64_t) bio->bi_sector);
2262
2263 do { 2237 do {
2264 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); 2238 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
2265 if (!k) { 2239 if (!k) {
@@ -2303,7 +2277,8 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2303} 2277}
2304 2278
2305static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, 2279static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2306 struct keybuf *buf, struct bkey *end) 2280 struct keybuf *buf, struct bkey *end,
2281 keybuf_pred_fn *pred)
2307{ 2282{
2308 struct btree_iter iter; 2283 struct btree_iter iter;
2309 bch_btree_iter_init(b, &iter, &buf->last_scanned); 2284 bch_btree_iter_init(b, &iter, &buf->last_scanned);
@@ -2322,11 +2297,9 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2322 if (bkey_cmp(&buf->last_scanned, end) >= 0) 2297 if (bkey_cmp(&buf->last_scanned, end) >= 0)
2323 break; 2298 break;
2324 2299
2325 if (buf->key_predicate(buf, k)) { 2300 if (pred(buf, k)) {
2326 struct keybuf_key *w; 2301 struct keybuf_key *w;
2327 2302
2328 pr_debug("%s", pkey(k));
2329
2330 spin_lock(&buf->lock); 2303 spin_lock(&buf->lock);
2331 2304
2332 w = array_alloc(&buf->freelist); 2305 w = array_alloc(&buf->freelist);
@@ -2343,7 +2316,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2343 if (!k) 2316 if (!k)
2344 break; 2317 break;
2345 2318
2346 btree(refill_keybuf, k, b, op, buf, end); 2319 btree(refill_keybuf, k, b, op, buf, end, pred);
2347 /* 2320 /*
2348 * Might get an error here, but can't really do anything 2321 * Might get an error here, but can't really do anything
2349 * and it'll get logged elsewhere. Just read what we 2322 * and it'll get logged elsewhere. Just read what we
@@ -2361,7 +2334,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2361} 2334}
2362 2335
2363void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2336void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2364 struct bkey *end) 2337 struct bkey *end, keybuf_pred_fn *pred)
2365{ 2338{
2366 struct bkey start = buf->last_scanned; 2339 struct bkey start = buf->last_scanned;
2367 struct btree_op op; 2340 struct btree_op op;
@@ -2369,7 +2342,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2369 2342
2370 cond_resched(); 2343 cond_resched();
2371 2344
2372 btree_root(refill_keybuf, c, &op, buf, end); 2345 btree_root(refill_keybuf, c, &op, buf, end, pred);
2373 closure_sync(&op.cl); 2346 closure_sync(&op.cl);
2374 2347
2375 pr_debug("found %s keys from %llu:%llu to %llu:%llu", 2348 pr_debug("found %s keys from %llu:%llu to %llu:%llu",
@@ -2455,7 +2428,8 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2455 2428
2456struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2429struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2457 struct keybuf *buf, 2430 struct keybuf *buf,
2458 struct bkey *end) 2431 struct bkey *end,
2432 keybuf_pred_fn *pred)
2459{ 2433{
2460 struct keybuf_key *ret; 2434 struct keybuf_key *ret;
2461 2435
@@ -2469,15 +2443,14 @@ struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2469 break; 2443 break;
2470 } 2444 }
2471 2445
2472 bch_refill_keybuf(c, buf, end); 2446 bch_refill_keybuf(c, buf, end, pred);
2473 } 2447 }
2474 2448
2475 return ret; 2449 return ret;
2476} 2450}
2477 2451
2478void bch_keybuf_init(struct keybuf *buf, keybuf_pred_fn *fn) 2452void bch_keybuf_init(struct keybuf *buf)
2479{ 2453{
2480 buf->key_predicate = fn;
2481 buf->last_scanned = MAX_KEY; 2454 buf->last_scanned = MAX_KEY;
2482 buf->keys = RB_ROOT; 2455 buf->keys = RB_ROOT;
2483 2456
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index af4a7092a28c..3333d3723633 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -102,7 +102,6 @@
102#include "debug.h" 102#include "debug.h"
103 103
104struct btree_write { 104struct btree_write {
105 struct closure *owner;
106 atomic_t *journal; 105 atomic_t *journal;
107 106
108 /* If btree_split() frees a btree node, it writes a new pointer to that 107 /* If btree_split() frees a btree node, it writes a new pointer to that
@@ -142,16 +141,12 @@ struct btree {
142 */ 141 */
143 struct bset_tree sets[MAX_BSETS]; 142 struct bset_tree sets[MAX_BSETS];
144 143
145 /* Used to refcount bio splits, also protects b->bio */ 144 /* For outstanding btree writes, used as a lock - protects write_idx */
146 struct closure_with_waitlist io; 145 struct closure_with_waitlist io;
147 146
148 /* Gets transferred to w->prio_blocked - see the comment there */
149 int prio_blocked;
150
151 struct list_head list; 147 struct list_head list;
152 struct delayed_work work; 148 struct delayed_work work;
153 149
154 uint64_t io_start_time;
155 struct btree_write writes[2]; 150 struct btree_write writes[2];
156 struct bio *bio; 151 struct bio *bio;
157}; 152};
@@ -164,13 +159,11 @@ static inline void set_btree_node_ ## flag(struct btree *b) \
164{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \ 159{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
165 160
166enum btree_flags { 161enum btree_flags {
167 BTREE_NODE_read_done,
168 BTREE_NODE_io_error, 162 BTREE_NODE_io_error,
169 BTREE_NODE_dirty, 163 BTREE_NODE_dirty,
170 BTREE_NODE_write_idx, 164 BTREE_NODE_write_idx,
171}; 165};
172 166
173BTREE_FLAG(read_done);
174BTREE_FLAG(io_error); 167BTREE_FLAG(io_error);
175BTREE_FLAG(dirty); 168BTREE_FLAG(dirty);
176BTREE_FLAG(write_idx); 169BTREE_FLAG(write_idx);
@@ -278,6 +271,13 @@ struct btree_op {
278 BKEY_PADDED(replace); 271 BKEY_PADDED(replace);
279}; 272};
280 273
274enum {
275 BTREE_INSERT_STATUS_INSERT,
276 BTREE_INSERT_STATUS_BACK_MERGE,
277 BTREE_INSERT_STATUS_OVERWROTE,
278 BTREE_INSERT_STATUS_FRONT_MERGE,
279};
280
281void bch_btree_op_init_stack(struct btree_op *); 281void bch_btree_op_init_stack(struct btree_op *);
282 282
283static inline void rw_lock(bool w, struct btree *b, int level) 283static inline void rw_lock(bool w, struct btree *b, int level)
@@ -293,9 +293,7 @@ static inline void rw_unlock(bool w, struct btree *b)
293#ifdef CONFIG_BCACHE_EDEBUG 293#ifdef CONFIG_BCACHE_EDEBUG
294 unsigned i; 294 unsigned i;
295 295
296 if (w && 296 if (w && b->key.ptr[0])
297 b->key.ptr[0] &&
298 btree_node_read_done(b))
299 for (i = 0; i <= b->nsets; i++) 297 for (i = 0; i <= b->nsets; i++)
300 bch_check_key_order(b, b->sets[i].data); 298 bch_check_key_order(b, b->sets[i].data);
301#endif 299#endif
@@ -370,9 +368,8 @@ static inline bool should_split(struct btree *b)
370 > btree_blocks(b)); 368 > btree_blocks(b));
371} 369}
372 370
373void bch_btree_read_done(struct closure *); 371void bch_btree_node_read(struct btree *);
374void bch_btree_read(struct btree *); 372void bch_btree_node_write(struct btree *, struct closure *);
375void bch_btree_write(struct btree *b, bool now, struct btree_op *op);
376 373
377void bch_cannibalize_unlock(struct cache_set *, struct closure *); 374void bch_cannibalize_unlock(struct cache_set *, struct closure *);
378void bch_btree_set_root(struct btree *); 375void bch_btree_set_root(struct btree *);
@@ -380,7 +377,6 @@ struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
380struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, 377struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
381 int, struct btree_op *); 378 int, struct btree_op *);
382 379
383bool bch_btree_insert_keys(struct btree *, struct btree_op *);
384bool bch_btree_insert_check_key(struct btree *, struct btree_op *, 380bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
385 struct bio *); 381 struct bio *);
386int bch_btree_insert(struct btree_op *, struct cache_set *); 382int bch_btree_insert(struct btree_op *, struct cache_set *);
@@ -393,13 +389,14 @@ void bch_moving_gc(struct closure *);
393int bch_btree_check(struct cache_set *, struct btree_op *); 389int bch_btree_check(struct cache_set *, struct btree_op *);
394uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); 390uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
395 391
396void bch_keybuf_init(struct keybuf *, keybuf_pred_fn *); 392void bch_keybuf_init(struct keybuf *);
397void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *); 393void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
394 keybuf_pred_fn *);
398bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, 395bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
399 struct bkey *); 396 struct bkey *);
400void bch_keybuf_del(struct keybuf *, struct keybuf_key *); 397void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
401struct keybuf_key *bch_keybuf_next(struct keybuf *); 398struct keybuf_key *bch_keybuf_next(struct keybuf *);
402struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, 399struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
403 struct keybuf *, struct bkey *); 400 struct bkey *, keybuf_pred_fn *);
404 401
405#endif 402#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index bd05a9a8c7cf..9aba2017f0d1 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
66 } else { 66 } else {
67 struct closure *parent = cl->parent; 67 struct closure *parent = cl->parent;
68 struct closure_waitlist *wait = closure_waitlist(cl); 68 struct closure_waitlist *wait = closure_waitlist(cl);
69 closure_fn *destructor = cl->fn;
69 70
70 closure_debug_destroy(cl); 71 closure_debug_destroy(cl);
71 72
73 smp_mb();
72 atomic_set(&cl->remaining, -1); 74 atomic_set(&cl->remaining, -1);
73 75
74 if (wait) 76 if (wait)
75 closure_wake_up(wait); 77 closure_wake_up(wait);
76 78
77 if (cl->fn) 79 if (destructor)
78 cl->fn(cl); 80 destructor(cl);
79 81
80 if (parent) 82 if (parent)
81 closure_put(parent); 83 closure_put(parent);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 89fd5204924e..88e6411eab4f 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -47,11 +47,10 @@ const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
47 return ""; 47 return "";
48} 48}
49 49
50struct keyprint_hack bch_pkey(const struct bkey *k) 50int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
51{ 51{
52 unsigned i = 0; 52 unsigned i = 0;
53 struct keyprint_hack r; 53 char *out = buf, *end = buf + size;
54 char *out = r.s, *end = r.s + KEYHACK_SIZE;
55 54
56#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) 55#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
57 56
@@ -75,16 +74,14 @@ struct keyprint_hack bch_pkey(const struct bkey *k)
75 if (KEY_CSUM(k)) 74 if (KEY_CSUM(k))
76 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); 75 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
77#undef p 76#undef p
78 return r; 77 return out - buf;
79} 78}
80 79
81struct keyprint_hack bch_pbtree(const struct btree *b) 80int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
82{ 81{
83 struct keyprint_hack r; 82 return scnprintf(buf, size, "%zu level %i/%i",
84 83 PTR_BUCKET_NR(b->c, &b->key, 0),
85 snprintf(r.s, 40, "%zu level %i/%i", PTR_BUCKET_NR(b->c, &b->key, 0), 84 b->level, b->c->root ? b->c->root->level : -1);
86 b->level, b->c->root ? b->c->root->level : -1);
87 return r;
88} 85}
89 86
90#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) 87#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
@@ -100,10 +97,12 @@ static void dump_bset(struct btree *b, struct bset *i)
100{ 97{
101 struct bkey *k; 98 struct bkey *k;
102 unsigned j; 99 unsigned j;
100 char buf[80];
103 101
104 for (k = i->start; k < end(i); k = bkey_next(k)) { 102 for (k = i->start; k < end(i); k = bkey_next(k)) {
103 bch_bkey_to_text(buf, sizeof(buf), k);
105 printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), 104 printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
106 (uint64_t *) k - i->d, i->keys, pkey(k)); 105 (uint64_t *) k - i->d, i->keys, buf);
107 106
108 for (j = 0; j < KEY_PTRS(k); j++) { 107 for (j = 0; j < KEY_PTRS(k); j++) {
109 size_t n = PTR_BUCKET_NR(b->c, k, j); 108 size_t n = PTR_BUCKET_NR(b->c, k, j);
@@ -144,7 +143,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
144 v->written = 0; 143 v->written = 0;
145 v->level = b->level; 144 v->level = b->level;
146 145
147 bch_btree_read(v); 146 bch_btree_node_read(v);
148 closure_wait_event(&v->io.wait, &cl, 147 closure_wait_event(&v->io.wait, &cl,
149 atomic_read(&b->io.cl.remaining) == -1); 148 atomic_read(&b->io.cl.remaining) == -1);
150 149
@@ -200,7 +199,7 @@ void bch_data_verify(struct search *s)
200 if (!check) 199 if (!check)
201 return; 200 return;
202 201
203 if (bch_bio_alloc_pages(check, GFP_NOIO)) 202 if (bio_alloc_pages(check, GFP_NOIO))
204 goto out_put; 203 goto out_put;
205 204
206 check->bi_rw = READ_SYNC; 205 check->bi_rw = READ_SYNC;
@@ -252,6 +251,7 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
252 va_list args) 251 va_list args)
253{ 252{
254 unsigned i; 253 unsigned i;
254 char buf[80];
255 255
256 console_lock(); 256 console_lock();
257 257
@@ -262,7 +262,8 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
262 262
263 console_unlock(); 263 console_unlock();
264 264
265 panic("at %s\n", pbtree(b)); 265 bch_btree_to_text(buf, sizeof(buf), b);
266 panic("at %s\n", buf);
266} 267}
267 268
268void bch_check_key_order_msg(struct btree *b, struct bset *i, 269void bch_check_key_order_msg(struct btree *b, struct bset *i,
@@ -337,6 +338,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
337{ 338{
338 struct dump_iterator *i = file->private_data; 339 struct dump_iterator *i = file->private_data;
339 ssize_t ret = 0; 340 ssize_t ret = 0;
341 char kbuf[80];
340 342
341 while (size) { 343 while (size) {
342 struct keybuf_key *w; 344 struct keybuf_key *w;
@@ -355,11 +357,12 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
355 if (i->bytes) 357 if (i->bytes)
356 break; 358 break;
357 359
358 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY); 360 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
359 if (!w) 361 if (!w)
360 break; 362 break;
361 363
362 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", pkey(&w->key)); 364 bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
365 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
363 bch_keybuf_del(&i->keys, w); 366 bch_keybuf_del(&i->keys, w);
364 } 367 }
365 368
@@ -377,7 +380,7 @@ static int bch_dump_open(struct inode *inode, struct file *file)
377 380
378 file->private_data = i; 381 file->private_data = i;
379 i->c = c; 382 i->c = c;
380 bch_keybuf_init(&i->keys, dump_pred); 383 bch_keybuf_init(&i->keys);
381 i->keys.last_scanned = KEY(0, 0, 0); 384 i->keys.last_scanned = KEY(0, 0, 0);
382 385
383 return 0; 386 return 0;
@@ -409,142 +412,6 @@ void bch_debug_init_cache_set(struct cache_set *c)
409 412
410#endif 413#endif
411 414
412/* Fuzz tester has rotted: */
413#if 0
414
415static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a,
416 const char *buffer, size_t size)
417{
418 void dump(struct btree *b)
419 {
420 struct bset *i;
421
422 for (i = b->sets[0].data;
423 index(i, b) < btree_blocks(b) &&
424 i->seq == b->sets[0].data->seq;
425 i = ((void *) i) + set_blocks(i, b->c) * block_bytes(b->c))
426 dump_bset(b, i);
427 }
428
429 struct cache_sb *sb;
430 struct cache_set *c;
431 struct btree *all[3], *b, *fill, *orig;
432 int j;
433
434 struct btree_op op;
435 bch_btree_op_init_stack(&op);
436
437 sb = kzalloc(sizeof(struct cache_sb), GFP_KERNEL);
438 if (!sb)
439 return -ENOMEM;
440
441 sb->bucket_size = 128;
442 sb->block_size = 4;
443
444 c = bch_cache_set_alloc(sb);
445 if (!c)
446 return -ENOMEM;
447
448 for (j = 0; j < 3; j++) {
449 BUG_ON(list_empty(&c->btree_cache));
450 all[j] = list_first_entry(&c->btree_cache, struct btree, list);
451 list_del_init(&all[j]->list);
452
453 all[j]->key = KEY(0, 0, c->sb.bucket_size);
454 bkey_copy_key(&all[j]->key, &MAX_KEY);
455 }
456
457 b = all[0];
458 fill = all[1];
459 orig = all[2];
460
461 while (1) {
462 for (j = 0; j < 3; j++)
463 all[j]->written = all[j]->nsets = 0;
464
465 bch_bset_init_next(b);
466
467 while (1) {
468 struct bset *i = write_block(b);
469 struct bkey *k = op.keys.top;
470 unsigned rand;
471
472 bkey_init(k);
473 rand = get_random_int();
474
475 op.type = rand & 1
476 ? BTREE_INSERT
477 : BTREE_REPLACE;
478 rand >>= 1;
479
480 SET_KEY_SIZE(k, bucket_remainder(c, rand));
481 rand >>= c->bucket_bits;
482 rand &= 1024 * 512 - 1;
483 rand += c->sb.bucket_size;
484 SET_KEY_OFFSET(k, rand);
485#if 0
486 SET_KEY_PTRS(k, 1);
487#endif
488 bch_keylist_push(&op.keys);
489 bch_btree_insert_keys(b, &op);
490
491 if (should_split(b) ||
492 set_blocks(i, b->c) !=
493 __set_blocks(i, i->keys + 15, b->c)) {
494 i->csum = csum_set(i);
495
496 memcpy(write_block(fill),
497 i, set_bytes(i));
498
499 b->written += set_blocks(i, b->c);
500 fill->written = b->written;
501 if (b->written == btree_blocks(b))
502 break;
503
504 bch_btree_sort_lazy(b);
505 bch_bset_init_next(b);
506 }
507 }
508
509 memcpy(orig->sets[0].data,
510 fill->sets[0].data,
511 btree_bytes(c));
512
513 bch_btree_sort(b);
514 fill->written = 0;
515 bch_btree_read_done(&fill->io.cl);
516
517 if (b->sets[0].data->keys != fill->sets[0].data->keys ||
518 memcmp(b->sets[0].data->start,
519 fill->sets[0].data->start,
520 b->sets[0].data->keys * sizeof(uint64_t))) {
521 struct bset *i = b->sets[0].data;
522 struct bkey *k, *l;
523
524 for (k = i->start,
525 l = fill->sets[0].data->start;
526 k < end(i);
527 k = bkey_next(k), l = bkey_next(l))
528 if (bkey_cmp(k, l) ||
529 KEY_SIZE(k) != KEY_SIZE(l))
530 pr_err("key %zi differs: %s != %s",
531 (uint64_t *) k - i->d,
532 pkey(k), pkey(l));
533
534 for (j = 0; j < 3; j++) {
535 pr_err("**** Set %i ****", j);
536 dump(all[j]);
537 }
538 panic("\n");
539 }
540
541 pr_info("fuzz complete: %i keys", b->sets[0].data->keys);
542 }
543}
544
545kobj_attribute_write(fuzz, btree_fuzz);
546#endif
547
548void bch_debug_exit(void) 415void bch_debug_exit(void)
549{ 416{
550 if (!IS_ERR_OR_NULL(debug)) 417 if (!IS_ERR_OR_NULL(debug))
@@ -554,11 +421,6 @@ void bch_debug_exit(void)
554int __init bch_debug_init(struct kobject *kobj) 421int __init bch_debug_init(struct kobject *kobj)
555{ 422{
556 int ret = 0; 423 int ret = 0;
557#if 0
558 ret = sysfs_create_file(kobj, &ksysfs_fuzz.attr);
559 if (ret)
560 return ret;
561#endif
562 424
563 debug = debugfs_create_dir("bcache", NULL); 425 debug = debugfs_create_dir("bcache", NULL);
564 return ret; 426 return ret;
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index f9378a218148..1c39b5a2489b 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -3,15 +3,8 @@
3 3
4/* Btree/bkey debug printing */ 4/* Btree/bkey debug printing */
5 5
6#define KEYHACK_SIZE 80 6int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
7struct keyprint_hack { 7int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
8 char s[KEYHACK_SIZE];
9};
10
11struct keyprint_hack bch_pkey(const struct bkey *k);
12struct keyprint_hack bch_pbtree(const struct btree *b);
13#define pkey(k) (&bch_pkey(k).s[0])
14#define pbtree(b) (&bch_pbtree(b).s[0])
15 8
16#ifdef CONFIG_BCACHE_EDEBUG 9#ifdef CONFIG_BCACHE_EDEBUG
17 10
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 48efd4dea645..9056632995b1 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -9,6 +9,8 @@
9#include "bset.h" 9#include "bset.h"
10#include "debug.h" 10#include "debug.h"
11 11
12#include <linux/blkdev.h>
13
12static void bch_bi_idx_hack_endio(struct bio *bio, int error) 14static void bch_bi_idx_hack_endio(struct bio *bio, int error)
13{ 15{
14 struct bio *p = bio->bi_private; 16 struct bio *p = bio->bi_private;
@@ -66,13 +68,6 @@ static void bch_generic_make_request_hack(struct bio *bio)
66 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a 68 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
67 * bvec boundry; it is the caller's responsibility to ensure that @bio is not 69 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
68 * freed before the split. 70 * freed before the split.
69 *
70 * If bch_bio_split() is running under generic_make_request(), it's not safe to
71 * allocate more than one bio from the same bio set. Therefore, if it is running
72 * under generic_make_request() it masks out __GFP_WAIT when doing the
73 * allocation. The caller must check for failure if there's any possibility of
74 * it being called from under generic_make_request(); it is then the caller's
75 * responsibility to retry from a safe context (by e.g. punting to workqueue).
76 */ 71 */
77struct bio *bch_bio_split(struct bio *bio, int sectors, 72struct bio *bch_bio_split(struct bio *bio, int sectors,
78 gfp_t gfp, struct bio_set *bs) 73 gfp_t gfp, struct bio_set *bs)
@@ -83,20 +78,13 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
83 78
84 BUG_ON(sectors <= 0); 79 BUG_ON(sectors <= 0);
85 80
86 /*
87 * If we're being called from underneath generic_make_request() and we
88 * already allocated any bios from this bio set, we risk deadlock if we
89 * use the mempool. So instead, we possibly fail and let the caller punt
90 * to workqueue or somesuch and retry in a safe context.
91 */
92 if (current->bio_list)
93 gfp &= ~__GFP_WAIT;
94
95 if (sectors >= bio_sectors(bio)) 81 if (sectors >= bio_sectors(bio))
96 return bio; 82 return bio;
97 83
98 if (bio->bi_rw & REQ_DISCARD) { 84 if (bio->bi_rw & REQ_DISCARD) {
99 ret = bio_alloc_bioset(gfp, 1, bs); 85 ret = bio_alloc_bioset(gfp, 1, bs);
86 if (!ret)
87 return NULL;
100 idx = 0; 88 idx = 0;
101 goto out; 89 goto out;
102 } 90 }
@@ -160,17 +148,18 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
160 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 148 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
161 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 149 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
162 queue_max_segments(q)); 150 queue_max_segments(q));
163 struct bio_vec *bv, *end = bio_iovec(bio) +
164 min_t(int, bio_segments(bio), max_segments);
165 151
166 if (bio->bi_rw & REQ_DISCARD) 152 if (bio->bi_rw & REQ_DISCARD)
167 return min(ret, q->limits.max_discard_sectors); 153 return min(ret, q->limits.max_discard_sectors);
168 154
169 if (bio_segments(bio) > max_segments || 155 if (bio_segments(bio) > max_segments ||
170 q->merge_bvec_fn) { 156 q->merge_bvec_fn) {
157 struct bio_vec *bv;
158 int i, seg = 0;
159
171 ret = 0; 160 ret = 0;
172 161
173 for (bv = bio_iovec(bio); bv < end; bv++) { 162 bio_for_each_segment(bv, bio, i) {
174 struct bvec_merge_data bvm = { 163 struct bvec_merge_data bvm = {
175 .bi_bdev = bio->bi_bdev, 164 .bi_bdev = bio->bi_bdev,
176 .bi_sector = bio->bi_sector, 165 .bi_sector = bio->bi_sector,
@@ -178,10 +167,14 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
178 .bi_rw = bio->bi_rw, 167 .bi_rw = bio->bi_rw,
179 }; 168 };
180 169
170 if (seg == max_segments)
171 break;
172
181 if (q->merge_bvec_fn && 173 if (q->merge_bvec_fn &&
182 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) 174 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
183 break; 175 break;
184 176
177 seg++;
185 ret += bv->bv_len >> 9; 178 ret += bv->bv_len >> 9;
186 } 179 }
187 } 180 }
@@ -218,30 +211,10 @@ static void bch_bio_submit_split_endio(struct bio *bio, int error)
218 closure_put(cl); 211 closure_put(cl);
219} 212}
220 213
221static void __bch_bio_submit_split(struct closure *cl)
222{
223 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
224 struct bio *bio = s->bio, *n;
225
226 do {
227 n = bch_bio_split(bio, bch_bio_max_sectors(bio),
228 GFP_NOIO, s->p->bio_split);
229 if (!n)
230 continue_at(cl, __bch_bio_submit_split, system_wq);
231
232 n->bi_end_io = bch_bio_submit_split_endio;
233 n->bi_private = cl;
234
235 closure_get(cl);
236 bch_generic_make_request_hack(n);
237 } while (n != bio);
238
239 continue_at(cl, bch_bio_submit_split_done, NULL);
240}
241
242void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) 214void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
243{ 215{
244 struct bio_split_hook *s; 216 struct bio_split_hook *s;
217 struct bio *n;
245 218
246 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) 219 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
247 goto submit; 220 goto submit;
@@ -250,6 +223,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
250 goto submit; 223 goto submit;
251 224
252 s = mempool_alloc(p->bio_split_hook, GFP_NOIO); 225 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
226 closure_init(&s->cl, NULL);
253 227
254 s->bio = bio; 228 s->bio = bio;
255 s->p = p; 229 s->p = p;
@@ -257,8 +231,18 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
257 s->bi_private = bio->bi_private; 231 s->bi_private = bio->bi_private;
258 bio_get(bio); 232 bio_get(bio);
259 233
260 closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL); 234 do {
261 return; 235 n = bch_bio_split(bio, bch_bio_max_sectors(bio),
236 GFP_NOIO, s->p->bio_split);
237
238 n->bi_end_io = bch_bio_submit_split_endio;
239 n->bi_private = &s->cl;
240
241 closure_get(&s->cl);
242 bch_generic_make_request_hack(n);
243 } while (n != bio);
244
245 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
262submit: 246submit:
263 bch_generic_make_request_hack(bio); 247 bch_generic_make_request_hack(bio);
264} 248}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8c8dfdcd9d4c..ba95ab84b2be 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -9,6 +9,8 @@
9#include "debug.h" 9#include "debug.h"
10#include "request.h" 10#include "request.h"
11 11
12#include <trace/events/bcache.h>
13
12/* 14/*
13 * Journal replay/recovery: 15 * Journal replay/recovery:
14 * 16 *
@@ -182,9 +184,14 @@ bsearch:
182 pr_debug("starting binary search, l %u r %u", l, r); 184 pr_debug("starting binary search, l %u r %u", l, r);
183 185
184 while (l + 1 < r) { 186 while (l + 1 < r) {
187 seq = list_entry(list->prev, struct journal_replay,
188 list)->j.seq;
189
185 m = (l + r) >> 1; 190 m = (l + r) >> 1;
191 read_bucket(m);
186 192
187 if (read_bucket(m)) 193 if (seq != list_entry(list->prev, struct journal_replay,
194 list)->j.seq)
188 l = m; 195 l = m;
189 else 196 else
190 r = m; 197 r = m;
@@ -300,7 +307,8 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
300 for (k = i->j.start; 307 for (k = i->j.start;
301 k < end(&i->j); 308 k < end(&i->j);
302 k = bkey_next(k)) { 309 k = bkey_next(k)) {
303 pr_debug("%s", pkey(k)); 310 trace_bcache_journal_replay_key(k);
311
304 bkey_copy(op->keys.top, k); 312 bkey_copy(op->keys.top, k);
305 bch_keylist_push(&op->keys); 313 bch_keylist_push(&op->keys);
306 314
@@ -384,7 +392,7 @@ out:
384 return; 392 return;
385found: 393found:
386 if (btree_node_dirty(best)) 394 if (btree_node_dirty(best))
387 bch_btree_write(best, true, NULL); 395 bch_btree_node_write(best, NULL);
388 rw_unlock(true, best); 396 rw_unlock(true, best);
389} 397}
390 398
@@ -617,7 +625,7 @@ static void journal_write_unlocked(struct closure *cl)
617 bio_reset(bio); 625 bio_reset(bio);
618 bio->bi_sector = PTR_OFFSET(k, i); 626 bio->bi_sector = PTR_OFFSET(k, i);
619 bio->bi_bdev = ca->bdev; 627 bio->bi_bdev = ca->bdev;
620 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH; 628 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
621 bio->bi_size = sectors << 9; 629 bio->bi_size = sectors << 9;
622 630
623 bio->bi_end_io = journal_write_endio; 631 bio->bi_end_io = journal_write_endio;
@@ -712,7 +720,8 @@ void bch_journal(struct closure *cl)
712 spin_lock(&c->journal.lock); 720 spin_lock(&c->journal.lock);
713 721
714 if (journal_full(&c->journal)) { 722 if (journal_full(&c->journal)) {
715 /* XXX: tracepoint */ 723 trace_bcache_journal_full(c);
724
716 closure_wait(&c->journal.wait, cl); 725 closure_wait(&c->journal.wait, cl);
717 726
718 journal_reclaim(c); 727 journal_reclaim(c);
@@ -728,13 +737,15 @@ void bch_journal(struct closure *cl)
728 737
729 if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS || 738 if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
730 b > c->journal.blocks_free) { 739 b > c->journal.blocks_free) {
731 /* XXX: If we were inserting so many keys that they won't fit in 740 trace_bcache_journal_entry_full(c);
741
742 /*
743 * XXX: If we were inserting so many keys that they won't fit in
732 * an _empty_ journal write, we'll deadlock. For now, handle 744 * an _empty_ journal write, we'll deadlock. For now, handle
733 * this in bch_keylist_realloc() - but something to think about. 745 * this in bch_keylist_realloc() - but something to think about.
734 */ 746 */
735 BUG_ON(!w->data->keys); 747 BUG_ON(!w->data->keys);
736 748
737 /* XXX: tracepoint */
738 BUG_ON(!closure_wait(&w->wait, cl)); 749 BUG_ON(!closure_wait(&w->wait, cl));
739 750
740 closure_flush(&c->journal.io); 751 closure_flush(&c->journal.io);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 8589512c972e..1a3b4f4786c3 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -9,6 +9,8 @@
9#include "debug.h" 9#include "debug.h"
10#include "request.h" 10#include "request.h"
11 11
12#include <trace/events/bcache.h>
13
12struct moving_io { 14struct moving_io {
13 struct keybuf_key *w; 15 struct keybuf_key *w;
14 struct search s; 16 struct search s;
@@ -44,14 +46,14 @@ static void write_moving_finish(struct closure *cl)
44{ 46{
45 struct moving_io *io = container_of(cl, struct moving_io, s.cl); 47 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
46 struct bio *bio = &io->bio.bio; 48 struct bio *bio = &io->bio.bio;
47 struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt); 49 struct bio_vec *bv;
50 int i;
48 51
49 while (bv-- != bio->bi_io_vec) 52 bio_for_each_segment_all(bv, bio, i)
50 __free_page(bv->bv_page); 53 __free_page(bv->bv_page);
51 54
52 pr_debug("%s %s", io->s.op.insert_collision 55 if (io->s.op.insert_collision)
53 ? "collision moving" : "moved", 56 trace_bcache_gc_copy_collision(&io->w->key);
54 pkey(&io->w->key));
55 57
56 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); 58 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
57 59
@@ -94,8 +96,6 @@ static void write_moving(struct closure *cl)
94 struct moving_io *io = container_of(s, struct moving_io, s); 96 struct moving_io *io = container_of(s, struct moving_io, s);
95 97
96 if (!s->error) { 98 if (!s->error) {
97 trace_bcache_write_moving(&io->bio.bio);
98
99 moving_init(io); 99 moving_init(io);
100 100
101 io->bio.bio.bi_sector = KEY_START(&io->w->key); 101 io->bio.bio.bi_sector = KEY_START(&io->w->key);
@@ -122,7 +122,6 @@ static void read_moving_submit(struct closure *cl)
122 struct moving_io *io = container_of(s, struct moving_io, s); 122 struct moving_io *io = container_of(s, struct moving_io, s);
123 struct bio *bio = &io->bio.bio; 123 struct bio *bio = &io->bio.bio;
124 124
125 trace_bcache_read_moving(bio);
126 bch_submit_bbio(bio, s->op.c, &io->w->key, 0); 125 bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
127 126
128 continue_at(cl, write_moving, bch_gc_wq); 127 continue_at(cl, write_moving, bch_gc_wq);
@@ -138,7 +137,8 @@ static void read_moving(struct closure *cl)
138 /* XXX: if we error, background writeback could stall indefinitely */ 137 /* XXX: if we error, background writeback could stall indefinitely */
139 138
140 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) { 139 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
141 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, &MAX_KEY); 140 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
141 &MAX_KEY, moving_pred);
142 if (!w) 142 if (!w)
143 break; 143 break;
144 144
@@ -159,10 +159,10 @@ static void read_moving(struct closure *cl)
159 bio->bi_rw = READ; 159 bio->bi_rw = READ;
160 bio->bi_end_io = read_moving_endio; 160 bio->bi_end_io = read_moving_endio;
161 161
162 if (bch_bio_alloc_pages(bio, GFP_KERNEL)) 162 if (bio_alloc_pages(bio, GFP_KERNEL))
163 goto err; 163 goto err;
164 164
165 pr_debug("%s", pkey(&w->key)); 165 trace_bcache_gc_copy(&w->key);
166 166
167 closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); 167 closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
168 168
@@ -250,5 +250,5 @@ void bch_moving_gc(struct closure *cl)
250 250
251void bch_moving_init_cache_set(struct cache_set *c) 251void bch_moving_init_cache_set(struct cache_set *c)
252{ 252{
253 bch_keybuf_init(&c->moving_gc_keys, moving_pred); 253 bch_keybuf_init(&c->moving_gc_keys);
254} 254}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index e5ff12e52d5b..786a1a4f74d8 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -10,6 +10,7 @@
10#include "btree.h" 10#include "btree.h"
11#include "debug.h" 11#include "debug.h"
12#include "request.h" 12#include "request.h"
13#include "writeback.h"
13 14
14#include <linux/cgroup.h> 15#include <linux/cgroup.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -21,8 +22,6 @@
21 22
22#define CUTOFF_CACHE_ADD 95 23#define CUTOFF_CACHE_ADD 95
23#define CUTOFF_CACHE_READA 90 24#define CUTOFF_CACHE_READA 90
24#define CUTOFF_WRITEBACK 50
25#define CUTOFF_WRITEBACK_SYNC 75
26 25
27struct kmem_cache *bch_search_cache; 26struct kmem_cache *bch_search_cache;
28 27
@@ -489,6 +488,12 @@ static void bch_insert_data_loop(struct closure *cl)
489 bch_queue_gc(op->c); 488 bch_queue_gc(op->c);
490 } 489 }
491 490
491 /*
492 * Journal writes are marked REQ_FLUSH; if the original write was a
493 * flush, it'll wait on the journal write.
494 */
495 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
496
492 do { 497 do {
493 unsigned i; 498 unsigned i;
494 struct bkey *k; 499 struct bkey *k;
@@ -510,10 +515,6 @@ static void bch_insert_data_loop(struct closure *cl)
510 goto err; 515 goto err;
511 516
512 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 517 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
513 if (!n) {
514 __bkey_put(op->c, k);
515 continue_at(cl, bch_insert_data_loop, bcache_wq);
516 }
517 518
518 n->bi_end_io = bch_insert_data_endio; 519 n->bi_end_io = bch_insert_data_endio;
519 n->bi_private = cl; 520 n->bi_private = cl;
@@ -530,10 +531,9 @@ static void bch_insert_data_loop(struct closure *cl)
530 if (KEY_CSUM(k)) 531 if (KEY_CSUM(k))
531 bio_csum(n, k); 532 bio_csum(n, k);
532 533
533 pr_debug("%s", pkey(k)); 534 trace_bcache_cache_insert(k);
534 bch_keylist_push(&op->keys); 535 bch_keylist_push(&op->keys);
535 536
536 trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev);
537 n->bi_rw |= REQ_WRITE; 537 n->bi_rw |= REQ_WRITE;
538 bch_submit_bbio(n, op->c, k, 0); 538 bch_submit_bbio(n, op->c, k, 0);
539 } while (n != bio); 539 } while (n != bio);
@@ -716,7 +716,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
716 s->task = current; 716 s->task = current;
717 s->orig_bio = bio; 717 s->orig_bio = bio;
718 s->write = (bio->bi_rw & REQ_WRITE) != 0; 718 s->write = (bio->bi_rw & REQ_WRITE) != 0;
719 s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0; 719 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; 720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
721 s->recoverable = 1; 721 s->recoverable = 1;
722 s->start_time = jiffies; 722 s->start_time = jiffies;
@@ -784,11 +784,8 @@ static void request_read_error(struct closure *cl)
784 int i; 784 int i;
785 785
786 if (s->recoverable) { 786 if (s->recoverable) {
787 /* The cache read failed, but we can retry from the backing 787 /* Retry from the backing device: */
788 * device. 788 trace_bcache_read_retry(s->orig_bio);
789 */
790 pr_debug("recovering at sector %llu",
791 (uint64_t) s->orig_bio->bi_sector);
792 789
793 s->error = 0; 790 s->error = 0;
794 bv = s->bio.bio.bi_io_vec; 791 bv = s->bio.bio.bi_io_vec;
@@ -806,7 +803,6 @@ static void request_read_error(struct closure *cl)
806 803
807 /* XXX: invalidate cache */ 804 /* XXX: invalidate cache */
808 805
809 trace_bcache_read_retry(&s->bio.bio);
810 closure_bio_submit(&s->bio.bio, &s->cl, s->d); 806 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
811 } 807 }
812 808
@@ -827,53 +823,13 @@ static void request_read_done(struct closure *cl)
827 */ 823 */
828 824
829 if (s->op.cache_bio) { 825 if (s->op.cache_bio) {
830 struct bio_vec *src, *dst;
831 unsigned src_offset, dst_offset, bytes;
832 void *dst_ptr;
833
834 bio_reset(s->op.cache_bio); 826 bio_reset(s->op.cache_bio);
835 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; 827 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
836 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; 828 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
837 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; 829 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
838 bch_bio_map(s->op.cache_bio, NULL); 830 bch_bio_map(s->op.cache_bio, NULL);
839 831
840 src = bio_iovec(s->op.cache_bio); 832 bio_copy_data(s->cache_miss, s->op.cache_bio);
841 dst = bio_iovec(s->cache_miss);
842 src_offset = src->bv_offset;
843 dst_offset = dst->bv_offset;
844 dst_ptr = kmap(dst->bv_page);
845
846 while (1) {
847 if (dst_offset == dst->bv_offset + dst->bv_len) {
848 kunmap(dst->bv_page);
849 dst++;
850 if (dst == bio_iovec_idx(s->cache_miss,
851 s->cache_miss->bi_vcnt))
852 break;
853
854 dst_offset = dst->bv_offset;
855 dst_ptr = kmap(dst->bv_page);
856 }
857
858 if (src_offset == src->bv_offset + src->bv_len) {
859 src++;
860 if (src == bio_iovec_idx(s->op.cache_bio,
861 s->op.cache_bio->bi_vcnt))
862 BUG();
863
864 src_offset = src->bv_offset;
865 }
866
867 bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
868 src->bv_offset + src->bv_len - src_offset);
869
870 memcpy(dst_ptr + dst_offset,
871 page_address(src->bv_page) + src_offset,
872 bytes);
873
874 src_offset += bytes;
875 dst_offset += bytes;
876 }
877 833
878 bio_put(s->cache_miss); 834 bio_put(s->cache_miss);
879 s->cache_miss = NULL; 835 s->cache_miss = NULL;
@@ -899,6 +855,7 @@ static void request_read_done_bh(struct closure *cl)
899 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 855 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
900 856
901 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); 857 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
858 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
902 859
903 if (s->error) 860 if (s->error)
904 continue_at_nobarrier(cl, request_read_error, bcache_wq); 861 continue_at_nobarrier(cl, request_read_error, bcache_wq);
@@ -917,9 +874,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
917 struct bio *miss; 874 struct bio *miss;
918 875
919 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 876 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
920 if (!miss)
921 return -EAGAIN;
922
923 if (miss == bio) 877 if (miss == bio)
924 s->op.lookup_done = true; 878 s->op.lookup_done = true;
925 879
@@ -938,8 +892,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
938 reada = min(dc->readahead >> 9, 892 reada = min(dc->readahead >> 9,
939 sectors - bio_sectors(miss)); 893 sectors - bio_sectors(miss));
940 894
941 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev)) 895 if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
942 reada = bdev_sectors(miss->bi_bdev) - bio_end(miss); 896 reada = bdev_sectors(miss->bi_bdev) -
897 bio_end_sector(miss);
943 } 898 }
944 899
945 s->cache_bio_sectors = bio_sectors(miss) + reada; 900 s->cache_bio_sectors = bio_sectors(miss) + reada;
@@ -963,13 +918,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
963 goto out_put; 918 goto out_put;
964 919
965 bch_bio_map(s->op.cache_bio, NULL); 920 bch_bio_map(s->op.cache_bio, NULL);
966 if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) 921 if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
967 goto out_put; 922 goto out_put;
968 923
969 s->cache_miss = miss; 924 s->cache_miss = miss;
970 bio_get(s->op.cache_bio); 925 bio_get(s->op.cache_bio);
971 926
972 trace_bcache_cache_miss(s->orig_bio);
973 closure_bio_submit(s->op.cache_bio, &s->cl, s->d); 927 closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
974 928
975 return ret; 929 return ret;
@@ -1002,24 +956,13 @@ static void cached_dev_write_complete(struct closure *cl)
1002 cached_dev_bio_complete(cl); 956 cached_dev_bio_complete(cl);
1003} 957}
1004 958
1005static bool should_writeback(struct cached_dev *dc, struct bio *bio)
1006{
1007 unsigned threshold = (bio->bi_rw & REQ_SYNC)
1008 ? CUTOFF_WRITEBACK_SYNC
1009 : CUTOFF_WRITEBACK;
1010
1011 return !atomic_read(&dc->disk.detaching) &&
1012 cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
1013 dc->disk.c->gc_stats.in_use < threshold;
1014}
1015
1016static void request_write(struct cached_dev *dc, struct search *s) 959static void request_write(struct cached_dev *dc, struct search *s)
1017{ 960{
1018 struct closure *cl = &s->cl; 961 struct closure *cl = &s->cl;
1019 struct bio *bio = &s->bio.bio; 962 struct bio *bio = &s->bio.bio;
1020 struct bkey start, end; 963 struct bkey start, end;
1021 start = KEY(dc->disk.id, bio->bi_sector, 0); 964 start = KEY(dc->disk.id, bio->bi_sector, 0);
1022 end = KEY(dc->disk.id, bio_end(bio), 0); 965 end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1023 966
1024 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); 967 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1025 968
@@ -1034,22 +977,37 @@ static void request_write(struct cached_dev *dc, struct search *s)
1034 if (bio->bi_rw & REQ_DISCARD) 977 if (bio->bi_rw & REQ_DISCARD)
1035 goto skip; 978 goto skip;
1036 979
980 if (should_writeback(dc, s->orig_bio,
981 cache_mode(dc, bio),
982 s->op.skip)) {
983 s->op.skip = false;
984 s->writeback = true;
985 }
986
1037 if (s->op.skip) 987 if (s->op.skip)
1038 goto skip; 988 goto skip;
1039 989
1040 if (should_writeback(dc, s->orig_bio)) 990 trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
1041 s->writeback = true;
1042 991
1043 if (!s->writeback) { 992 if (!s->writeback) {
1044 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 993 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1045 dc->disk.bio_split); 994 dc->disk.bio_split);
1046 995
1047 trace_bcache_writethrough(s->orig_bio);
1048 closure_bio_submit(bio, cl, s->d); 996 closure_bio_submit(bio, cl, s->d);
1049 } else { 997 } else {
1050 s->op.cache_bio = bio; 998 bch_writeback_add(dc);
1051 trace_bcache_writeback(s->orig_bio); 999
1052 bch_writeback_add(dc, bio_sectors(bio)); 1000 if (s->op.flush_journal) {
1001 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1003 dc->disk.bio_split);
1004
1005 bio->bi_size = 0;
1006 bio->bi_vcnt = 0;
1007 closure_bio_submit(bio, cl, s->d);
1008 } else {
1009 s->op.cache_bio = bio;
1010 }
1053 } 1011 }
1054out: 1012out:
1055 closure_call(&s->op.cl, bch_insert_data, NULL, cl); 1013 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
@@ -1058,7 +1016,6 @@ skip:
1058 s->op.skip = true; 1016 s->op.skip = true;
1059 s->op.cache_bio = s->orig_bio; 1017 s->op.cache_bio = s->orig_bio;
1060 bio_get(s->op.cache_bio); 1018 bio_get(s->op.cache_bio);
1061 trace_bcache_write_skip(s->orig_bio);
1062 1019
1063 if ((bio->bi_rw & REQ_DISCARD) && 1020 if ((bio->bi_rw & REQ_DISCARD) &&
1064 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1021 !blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1088,9 +1045,10 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
1088 1045
1089/* Cached devices - read & write stuff */ 1046/* Cached devices - read & write stuff */
1090 1047
1091int bch_get_congested(struct cache_set *c) 1048unsigned bch_get_congested(struct cache_set *c)
1092{ 1049{
1093 int i; 1050 int i;
1051 long rand;
1094 1052
1095 if (!c->congested_read_threshold_us && 1053 if (!c->congested_read_threshold_us &&
1096 !c->congested_write_threshold_us) 1054 !c->congested_write_threshold_us)
@@ -1106,7 +1064,13 @@ int bch_get_congested(struct cache_set *c)
1106 1064
1107 i += CONGESTED_MAX; 1065 i += CONGESTED_MAX;
1108 1066
1109 return i <= 0 ? 1 : fract_exp_two(i, 6); 1067 if (i > 0)
1068 i = fract_exp_two(i, 6);
1069
1070 rand = get_random_int();
1071 i -= bitmap_weight(&rand, BITS_PER_LONG);
1072
1073 return i > 0 ? i : 1;
1110} 1074}
1111 1075
1112static void add_sequential(struct task_struct *t) 1076static void add_sequential(struct task_struct *t)
@@ -1126,10 +1090,8 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
1126{ 1090{
1127 struct cache_set *c = s->op.c; 1091 struct cache_set *c = s->op.c;
1128 struct bio *bio = &s->bio.bio; 1092 struct bio *bio = &s->bio.bio;
1129
1130 long rand;
1131 int cutoff = bch_get_congested(c);
1132 unsigned mode = cache_mode(dc, bio); 1093 unsigned mode = cache_mode(dc, bio);
1094 unsigned sectors, congested = bch_get_congested(c);
1133 1095
1134 if (atomic_read(&dc->disk.detaching) || 1096 if (atomic_read(&dc->disk.detaching) ||
1135 c->gc_stats.in_use > CUTOFF_CACHE_ADD || 1097 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
@@ -1147,17 +1109,14 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
1147 goto skip; 1109 goto skip;
1148 } 1110 }
1149 1111
1150 if (!cutoff) { 1112 if (!congested && !dc->sequential_cutoff)
1151 cutoff = dc->sequential_cutoff >> 9; 1113 goto rescale;
1152 1114
1153 if (!cutoff) 1115 if (!congested &&
1154 goto rescale; 1116 mode == CACHE_MODE_WRITEBACK &&
1155 1117 (bio->bi_rw & REQ_WRITE) &&
1156 if (mode == CACHE_MODE_WRITEBACK && 1118 (bio->bi_rw & REQ_SYNC))
1157 (bio->bi_rw & REQ_WRITE) && 1119 goto rescale;
1158 (bio->bi_rw & REQ_SYNC))
1159 goto rescale;
1160 }
1161 1120
1162 if (dc->sequential_merge) { 1121 if (dc->sequential_merge) {
1163 struct io *i; 1122 struct io *i;
@@ -1177,7 +1136,7 @@ found:
1177 if (i->sequential + bio->bi_size > i->sequential) 1136 if (i->sequential + bio->bi_size > i->sequential)
1178 i->sequential += bio->bi_size; 1137 i->sequential += bio->bi_size;
1179 1138
1180 i->last = bio_end(bio); 1139 i->last = bio_end_sector(bio);
1181 i->jiffies = jiffies + msecs_to_jiffies(5000); 1140 i->jiffies = jiffies + msecs_to_jiffies(5000);
1182 s->task->sequential_io = i->sequential; 1141 s->task->sequential_io = i->sequential;
1183 1142
@@ -1192,12 +1151,19 @@ found:
1192 add_sequential(s->task); 1151 add_sequential(s->task);
1193 } 1152 }
1194 1153
1195 rand = get_random_int(); 1154 sectors = max(s->task->sequential_io,
1196 cutoff -= bitmap_weight(&rand, BITS_PER_LONG); 1155 s->task->sequential_io_avg) >> 9;
1197 1156
1198 if (cutoff <= (int) (max(s->task->sequential_io, 1157 if (dc->sequential_cutoff &&
1199 s->task->sequential_io_avg) >> 9)) 1158 sectors >= dc->sequential_cutoff >> 9) {
1159 trace_bcache_bypass_sequential(s->orig_bio);
1200 goto skip; 1160 goto skip;
1161 }
1162
1163 if (congested && sectors >= congested) {
1164 trace_bcache_bypass_congested(s->orig_bio);
1165 goto skip;
1166 }
1201 1167
1202rescale: 1168rescale:
1203 bch_rescale_priorities(c, bio_sectors(bio)); 1169 bch_rescale_priorities(c, bio_sectors(bio));
@@ -1288,30 +1254,25 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1288static int flash_dev_cache_miss(struct btree *b, struct search *s, 1254static int flash_dev_cache_miss(struct btree *b, struct search *s,
1289 struct bio *bio, unsigned sectors) 1255 struct bio *bio, unsigned sectors)
1290{ 1256{
1257 struct bio_vec *bv;
1258 int i;
1259
1291 /* Zero fill bio */ 1260 /* Zero fill bio */
1292 1261
1293 while (bio->bi_idx != bio->bi_vcnt) { 1262 bio_for_each_segment(bv, bio, i) {
1294 struct bio_vec *bv = bio_iovec(bio);
1295 unsigned j = min(bv->bv_len >> 9, sectors); 1263 unsigned j = min(bv->bv_len >> 9, sectors);
1296 1264
1297 void *p = kmap(bv->bv_page); 1265 void *p = kmap(bv->bv_page);
1298 memset(p + bv->bv_offset, 0, j << 9); 1266 memset(p + bv->bv_offset, 0, j << 9);
1299 kunmap(bv->bv_page); 1267 kunmap(bv->bv_page);
1300 1268
1301 bv->bv_len -= j << 9; 1269 sectors -= j;
1302 bv->bv_offset += j << 9;
1303
1304 if (bv->bv_len)
1305 return 0;
1306
1307 bio->bi_sector += j;
1308 bio->bi_size -= j << 9;
1309
1310 bio->bi_idx++;
1311 sectors -= j;
1312 } 1270 }
1313 1271
1314 s->op.lookup_done = true; 1272 bio_advance(bio, min(sectors << 9, bio->bi_size));
1273
1274 if (!bio->bi_size)
1275 s->op.lookup_done = true;
1315 1276
1316 return 0; 1277 return 0;
1317} 1278}
@@ -1338,8 +1299,8 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1338 closure_call(&s->op.cl, btree_read_async, NULL, cl); 1299 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1339 } else if (bio_has_data(bio) || s->op.skip) { 1300 } else if (bio_has_data(bio) || s->op.skip) {
1340 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, 1301 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1341 &KEY(d->id, bio->bi_sector, 0), 1302 &KEY(d->id, bio->bi_sector, 0),
1342 &KEY(d->id, bio_end(bio), 0)); 1303 &KEY(d->id, bio_end_sector(bio), 0));
1343 1304
1344 s->writeback = true; 1305 s->writeback = true;
1345 s->op.cache_bio = bio; 1306 s->op.cache_bio = bio;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 254d9ab5707c..57dc4784f4f4 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -30,7 +30,7 @@ struct search {
30}; 30};
31 31
32void bch_cache_read_endio(struct bio *, int); 32void bch_cache_read_endio(struct bio *, int);
33int bch_get_congested(struct cache_set *); 33unsigned bch_get_congested(struct cache_set *);
34void bch_insert_data(struct closure *cl); 34void bch_insert_data(struct closure *cl);
35void bch_btree_insert_async(struct closure *); 35void bch_btree_insert_async(struct closure *);
36void bch_cache_read_endio(struct bio *, int); 36void bch_cache_read_endio(struct bio *, int);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f88e2b653a3f..547c4c57b052 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -10,10 +10,13 @@
10#include "btree.h" 10#include "btree.h"
11#include "debug.h" 11#include "debug.h"
12#include "request.h" 12#include "request.h"
13#include "writeback.h"
13 14
15#include <linux/blkdev.h>
14#include <linux/buffer_head.h> 16#include <linux/buffer_head.h>
15#include <linux/debugfs.h> 17#include <linux/debugfs.h>
16#include <linux/genhd.h> 18#include <linux/genhd.h>
19#include <linux/kthread.h>
17#include <linux/module.h> 20#include <linux/module.h>
18#include <linux/random.h> 21#include <linux/random.h>
19#include <linux/reboot.h> 22#include <linux/reboot.h>
@@ -342,6 +345,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
342 struct closure *cl = &c->uuid_write.cl; 345 struct closure *cl = &c->uuid_write.cl;
343 struct uuid_entry *u; 346 struct uuid_entry *u;
344 unsigned i; 347 unsigned i;
348 char buf[80];
345 349
346 BUG_ON(!parent); 350 BUG_ON(!parent);
347 closure_lock(&c->uuid_write, parent); 351 closure_lock(&c->uuid_write, parent);
@@ -362,8 +366,8 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
362 break; 366 break;
363 } 367 }
364 368
365 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", 369 bch_bkey_to_text(buf, sizeof(buf), k);
366 pkey(&c->uuid_bucket)); 370 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
367 371
368 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 372 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
369 if (!bch_is_zero(u->uuid, 16)) 373 if (!bch_is_zero(u->uuid, 16))
@@ -543,7 +547,6 @@ void bch_prio_write(struct cache *ca)
543 547
544 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 548 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
545 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 549 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
546 blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
547 550
548 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 551 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
549 long bucket; 552 long bucket;
@@ -704,7 +707,8 @@ static void bcache_device_detach(struct bcache_device *d)
704 atomic_set(&d->detaching, 0); 707 atomic_set(&d->detaching, 0);
705 } 708 }
706 709
707 bcache_device_unlink(d); 710 if (!d->flush_done)
711 bcache_device_unlink(d);
708 712
709 d->c->devices[d->id] = NULL; 713 d->c->devices[d->id] = NULL;
710 closure_put(&d->c->caching); 714 closure_put(&d->c->caching);
@@ -743,13 +747,35 @@ static void bcache_device_free(struct bcache_device *d)
743 mempool_destroy(d->unaligned_bvec); 747 mempool_destroy(d->unaligned_bvec);
744 if (d->bio_split) 748 if (d->bio_split)
745 bioset_free(d->bio_split); 749 bioset_free(d->bio_split);
750 if (is_vmalloc_addr(d->stripe_sectors_dirty))
751 vfree(d->stripe_sectors_dirty);
752 else
753 kfree(d->stripe_sectors_dirty);
746 754
747 closure_debug_destroy(&d->cl); 755 closure_debug_destroy(&d->cl);
748} 756}
749 757
750static int bcache_device_init(struct bcache_device *d, unsigned block_size) 758static int bcache_device_init(struct bcache_device *d, unsigned block_size,
759 sector_t sectors)
751{ 760{
752 struct request_queue *q; 761 struct request_queue *q;
762 size_t n;
763
764 if (!d->stripe_size_bits)
765 d->stripe_size_bits = 31;
766
767 d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
768 d->stripe_size_bits;
769
770 if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
771 return -ENOMEM;
772
773 n = d->nr_stripes * sizeof(atomic_t);
774 d->stripe_sectors_dirty = n < PAGE_SIZE << 6
775 ? kzalloc(n, GFP_KERNEL)
776 : vzalloc(n);
777 if (!d->stripe_sectors_dirty)
778 return -ENOMEM;
753 779
754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 780 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 781 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
@@ -759,6 +785,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
759 !(q = blk_alloc_queue(GFP_KERNEL))) 785 !(q = blk_alloc_queue(GFP_KERNEL)))
760 return -ENOMEM; 786 return -ENOMEM;
761 787
788 set_capacity(d->disk, sectors);
762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 789 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
763 790
764 d->disk->major = bcache_major; 791 d->disk->major = bcache_major;
@@ -781,6 +808,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
781 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 808 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
782 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 809 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
783 810
811 blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
812
784 return 0; 813 return 0;
785} 814}
786 815
@@ -800,6 +829,17 @@ static void calc_cached_dev_sectors(struct cache_set *c)
800void bch_cached_dev_run(struct cached_dev *dc) 829void bch_cached_dev_run(struct cached_dev *dc)
801{ 830{
802 struct bcache_device *d = &dc->disk; 831 struct bcache_device *d = &dc->disk;
832 char buf[SB_LABEL_SIZE + 1];
833 char *env[] = {
834 "DRIVER=bcache",
835 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
836 NULL,
837 NULL,
838 };
839
840 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
841 buf[SB_LABEL_SIZE] = '\0';
842 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
803 843
804 if (atomic_xchg(&dc->running, 1)) 844 if (atomic_xchg(&dc->running, 1))
805 return; 845 return;
@@ -816,10 +856,12 @@ void bch_cached_dev_run(struct cached_dev *dc)
816 856
817 add_disk(d->disk); 857 add_disk(d->disk);
818 bd_link_disk_holder(dc->bdev, dc->disk.disk); 858 bd_link_disk_holder(dc->bdev, dc->disk.disk);
819#if 0 859 /* won't show up in the uevent file, use udevadm monitor -e instead
820 char *env[] = { "SYMLINK=label" , NULL }; 860 * only class / kset properties are persistent */
821 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 861 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
822#endif 862 kfree(env[1]);
863 kfree(env[2]);
864
823 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 865 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
824 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 866 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
825 pr_debug("error creating sysfs link"); 867 pr_debug("error creating sysfs link");
@@ -960,6 +1002,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
960 atomic_set(&dc->count, 1); 1002 atomic_set(&dc->count, 1);
961 1003
962 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1004 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1005 bch_sectors_dirty_init(dc);
963 atomic_set(&dc->has_dirty, 1); 1006 atomic_set(&dc->has_dirty, 1);
964 atomic_inc(&dc->count); 1007 atomic_inc(&dc->count);
965 bch_writeback_queue(dc); 1008 bch_writeback_queue(dc);
@@ -1014,6 +1057,14 @@ static void cached_dev_flush(struct closure *cl)
1014 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1057 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1015 struct bcache_device *d = &dc->disk; 1058 struct bcache_device *d = &dc->disk;
1016 1059
1060 mutex_lock(&bch_register_lock);
1061 d->flush_done = 1;
1062
1063 if (d->c)
1064 bcache_device_unlink(d);
1065
1066 mutex_unlock(&bch_register_lock);
1067
1017 bch_cache_accounting_destroy(&dc->accounting); 1068 bch_cache_accounting_destroy(&dc->accounting);
1018 kobject_del(&d->kobj); 1069 kobject_del(&d->kobj);
1019 1070
@@ -1045,7 +1096,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1096 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1046 } 1097 }
1047 1098
1048 ret = bcache_device_init(&dc->disk, block_size); 1099 ret = bcache_device_init(&dc->disk, block_size,
1100 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1049 if (ret) 1101 if (ret)
1050 return ret; 1102 return ret;
1051 1103
@@ -1144,11 +1196,10 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1144 1196
1145 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1197 kobject_init(&d->kobj, &bch_flash_dev_ktype);
1146 1198
1147 if (bcache_device_init(d, block_bytes(c))) 1199 if (bcache_device_init(d, block_bytes(c), u->sectors))
1148 goto err; 1200 goto err;
1149 1201
1150 bcache_device_attach(d, c, u - c->uuids); 1202 bcache_device_attach(d, c, u - c->uuids);
1151 set_capacity(d->disk, u->sectors);
1152 bch_flash_dev_request_init(d); 1203 bch_flash_dev_request_init(d);
1153 add_disk(d->disk); 1204 add_disk(d->disk);
1154 1205
@@ -1255,9 +1306,10 @@ static void cache_set_free(struct closure *cl)
1255 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1306 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
1256 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); 1307 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
1257 1308
1258 kfree(c->fill_iter);
1259 if (c->bio_split) 1309 if (c->bio_split)
1260 bioset_free(c->bio_split); 1310 bioset_free(c->bio_split);
1311 if (c->fill_iter)
1312 mempool_destroy(c->fill_iter);
1261 if (c->bio_meta) 1313 if (c->bio_meta)
1262 mempool_destroy(c->bio_meta); 1314 mempool_destroy(c->bio_meta);
1263 if (c->search) 1315 if (c->search)
@@ -1278,11 +1330,9 @@ static void cache_set_free(struct closure *cl)
1278static void cache_set_flush(struct closure *cl) 1330static void cache_set_flush(struct closure *cl)
1279{ 1331{
1280 struct cache_set *c = container_of(cl, struct cache_set, caching); 1332 struct cache_set *c = container_of(cl, struct cache_set, caching);
1333 struct cache *ca;
1281 struct btree *b; 1334 struct btree *b;
1282 1335 unsigned i;
1283 /* Shut down allocator threads */
1284 set_bit(CACHE_SET_STOPPING_2, &c->flags);
1285 wake_up(&c->alloc_wait);
1286 1336
1287 bch_cache_accounting_destroy(&c->accounting); 1337 bch_cache_accounting_destroy(&c->accounting);
1288 1338
@@ -1295,7 +1345,11 @@ static void cache_set_flush(struct closure *cl)
1295 /* Should skip this if we're unregistering because of an error */ 1345 /* Should skip this if we're unregistering because of an error */
1296 list_for_each_entry(b, &c->btree_cache, list) 1346 list_for_each_entry(b, &c->btree_cache, list)
1297 if (btree_node_dirty(b)) 1347 if (btree_node_dirty(b))
1298 bch_btree_write(b, true, NULL); 1348 bch_btree_node_write(b, NULL);
1349
1350 for_each_cache(ca, c, i)
1351 if (ca->alloc_thread)
1352 kthread_stop(ca->alloc_thread);
1299 1353
1300 closure_return(cl); 1354 closure_return(cl);
1301} 1355}
@@ -1303,18 +1357,22 @@ static void cache_set_flush(struct closure *cl)
1303static void __cache_set_unregister(struct closure *cl) 1357static void __cache_set_unregister(struct closure *cl)
1304{ 1358{
1305 struct cache_set *c = container_of(cl, struct cache_set, caching); 1359 struct cache_set *c = container_of(cl, struct cache_set, caching);
1306 struct cached_dev *dc, *t; 1360 struct cached_dev *dc;
1307 size_t i; 1361 size_t i;
1308 1362
1309 mutex_lock(&bch_register_lock); 1363 mutex_lock(&bch_register_lock);
1310 1364
1311 if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
1312 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1313 bch_cached_dev_detach(dc);
1314
1315 for (i = 0; i < c->nr_uuids; i++) 1365 for (i = 0; i < c->nr_uuids; i++)
1316 if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) 1366 if (c->devices[i]) {
1317 bcache_device_stop(c->devices[i]); 1367 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1368 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1369 dc = container_of(c->devices[i],
1370 struct cached_dev, disk);
1371 bch_cached_dev_detach(dc);
1372 } else {
1373 bcache_device_stop(c->devices[i]);
1374 }
1375 }
1318 1376
1319 mutex_unlock(&bch_register_lock); 1377 mutex_unlock(&bch_register_lock);
1320 1378
@@ -1373,9 +1431,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1373 c->btree_pages = max_t(int, c->btree_pages / 4, 1431 c->btree_pages = max_t(int, c->btree_pages / 4,
1374 BTREE_MAX_PAGES); 1432 BTREE_MAX_PAGES);
1375 1433
1376 init_waitqueue_head(&c->alloc_wait); 1434 c->sort_crit_factor = int_sqrt(c->btree_pages);
1435
1377 mutex_init(&c->bucket_lock); 1436 mutex_init(&c->bucket_lock);
1378 mutex_init(&c->fill_lock);
1379 mutex_init(&c->sort_lock); 1437 mutex_init(&c->sort_lock);
1380 spin_lock_init(&c->sort_time_lock); 1438 spin_lock_init(&c->sort_time_lock);
1381 closure_init_unlocked(&c->sb_write); 1439 closure_init_unlocked(&c->sb_write);
@@ -1401,8 +1459,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1401 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1459 !(c->bio_meta = mempool_create_kmalloc_pool(2,
1402 sizeof(struct bbio) + sizeof(struct bio_vec) * 1460 sizeof(struct bbio) + sizeof(struct bio_vec) *
1403 bucket_pages(c))) || 1461 bucket_pages(c))) ||
1462 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
1404 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 1463 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
1405 !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) ||
1406 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || 1464 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
1407 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1465 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1408 bch_journal_alloc(c) || 1466 bch_journal_alloc(c) ||
@@ -1410,8 +1468,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1410 bch_open_buckets_alloc(c)) 1468 bch_open_buckets_alloc(c))
1411 goto err; 1469 goto err;
1412 1470
1413 c->fill_iter->size = sb->bucket_size / sb->block_size;
1414
1415 c->congested_read_threshold_us = 2000; 1471 c->congested_read_threshold_us = 2000;
1416 c->congested_write_threshold_us = 20000; 1472 c->congested_write_threshold_us = 20000;
1417 c->error_limit = 8 << IO_ERROR_SHIFT; 1473 c->error_limit = 8 << IO_ERROR_SHIFT;
@@ -1496,9 +1552,10 @@ static void run_cache_set(struct cache_set *c)
1496 */ 1552 */
1497 bch_journal_next(&c->journal); 1553 bch_journal_next(&c->journal);
1498 1554
1555 err = "error starting allocator thread";
1499 for_each_cache(ca, c, i) 1556 for_each_cache(ca, c, i)
1500 closure_call(&ca->alloc, bch_allocator_thread, 1557 if (bch_cache_allocator_start(ca))
1501 system_wq, &c->cl); 1558 goto err;
1502 1559
1503 /* 1560 /*
1504 * First place it's safe to allocate: btree_check() and 1561 * First place it's safe to allocate: btree_check() and
@@ -1531,17 +1588,16 @@ static void run_cache_set(struct cache_set *c)
1531 1588
1532 bch_btree_gc_finish(c); 1589 bch_btree_gc_finish(c);
1533 1590
1591 err = "error starting allocator thread";
1534 for_each_cache(ca, c, i) 1592 for_each_cache(ca, c, i)
1535 closure_call(&ca->alloc, bch_allocator_thread, 1593 if (bch_cache_allocator_start(ca))
1536 ca->alloc_workqueue, &c->cl); 1594 goto err;
1537 1595
1538 mutex_lock(&c->bucket_lock); 1596 mutex_lock(&c->bucket_lock);
1539 for_each_cache(ca, c, i) 1597 for_each_cache(ca, c, i)
1540 bch_prio_write(ca); 1598 bch_prio_write(ca);
1541 mutex_unlock(&c->bucket_lock); 1599 mutex_unlock(&c->bucket_lock);
1542 1600
1543 wake_up(&c->alloc_wait);
1544
1545 err = "cannot allocate new UUID bucket"; 1601 err = "cannot allocate new UUID bucket";
1546 if (__uuid_write(c)) 1602 if (__uuid_write(c))
1547 goto err_unlock_gc; 1603 goto err_unlock_gc;
@@ -1552,7 +1608,7 @@ static void run_cache_set(struct cache_set *c)
1552 goto err_unlock_gc; 1608 goto err_unlock_gc;
1553 1609
1554 bkey_copy_key(&c->root->key, &MAX_KEY); 1610 bkey_copy_key(&c->root->key, &MAX_KEY);
1555 bch_btree_write(c->root, true, &op); 1611 bch_btree_node_write(c->root, &op.cl);
1556 1612
1557 bch_btree_set_root(c->root); 1613 bch_btree_set_root(c->root);
1558 rw_unlock(true, c->root); 1614 rw_unlock(true, c->root);
@@ -1673,9 +1729,6 @@ void bch_cache_release(struct kobject *kobj)
1673 1729
1674 bio_split_pool_free(&ca->bio_split_hook); 1730 bio_split_pool_free(&ca->bio_split_hook);
1675 1731
1676 if (ca->alloc_workqueue)
1677 destroy_workqueue(ca->alloc_workqueue);
1678
1679 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1732 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
1680 kfree(ca->prio_buckets); 1733 kfree(ca->prio_buckets);
1681 vfree(ca->buckets); 1734 vfree(ca->buckets);
@@ -1723,7 +1776,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1776 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1724 2, GFP_KERNEL)) || 1777 2, GFP_KERNEL)) ||
1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1778 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
1727 bio_split_pool_init(&ca->bio_split_hook)) 1779 bio_split_pool_init(&ca->bio_split_hook))
1728 return -ENOMEM; 1780 return -ENOMEM;
1729 1781
@@ -1786,6 +1838,36 @@ static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
1786kobj_attribute_write(register, register_bcache); 1838kobj_attribute_write(register, register_bcache);
1787kobj_attribute_write(register_quiet, register_bcache); 1839kobj_attribute_write(register_quiet, register_bcache);
1788 1840
1841static bool bch_is_open_backing(struct block_device *bdev) {
1842 struct cache_set *c, *tc;
1843 struct cached_dev *dc, *t;
1844
1845 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1846 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1847 if (dc->bdev == bdev)
1848 return true;
1849 list_for_each_entry_safe(dc, t, &uncached_devices, list)
1850 if (dc->bdev == bdev)
1851 return true;
1852 return false;
1853}
1854
1855static bool bch_is_open_cache(struct block_device *bdev) {
1856 struct cache_set *c, *tc;
1857 struct cache *ca;
1858 unsigned i;
1859
1860 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1861 for_each_cache(ca, c, i)
1862 if (ca->bdev == bdev)
1863 return true;
1864 return false;
1865}
1866
1867static bool bch_is_open(struct block_device *bdev) {
1868 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
1869}
1870
1789static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1871static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1790 const char *buffer, size_t size) 1872 const char *buffer, size_t size)
1791{ 1873{
@@ -1810,8 +1892,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1892 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1811 sb); 1893 sb);
1812 if (IS_ERR(bdev)) { 1894 if (IS_ERR(bdev)) {
1813 if (bdev == ERR_PTR(-EBUSY)) 1895 if (bdev == ERR_PTR(-EBUSY)) {
1814 err = "device busy"; 1896 bdev = lookup_bdev(strim(path));
1897 if (!IS_ERR(bdev) && bch_is_open(bdev))
1898 err = "device already registered";
1899 else
1900 err = "device busy";
1901 }
1815 goto err; 1902 goto err;
1816 } 1903 }
1817 1904
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4d9cca47e4c6..12a2c2846f99 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -9,7 +9,9 @@
9#include "sysfs.h" 9#include "sysfs.h"
10#include "btree.h" 10#include "btree.h"
11#include "request.h" 11#include "request.h"
12#include "writeback.h"
12 13
14#include <linux/blkdev.h>
13#include <linux/sort.h> 15#include <linux/sort.h>
14 16
15static const char * const cache_replacement_policies[] = { 17static const char * const cache_replacement_policies[] = {
@@ -79,6 +81,9 @@ rw_attribute(writeback_rate_p_term_inverse);
79rw_attribute(writeback_rate_d_smooth); 81rw_attribute(writeback_rate_d_smooth);
80read_attribute(writeback_rate_debug); 82read_attribute(writeback_rate_debug);
81 83
84read_attribute(stripe_size);
85read_attribute(partial_stripes_expensive);
86
82rw_attribute(synchronous); 87rw_attribute(synchronous);
83rw_attribute(journal_delay_ms); 88rw_attribute(journal_delay_ms);
84rw_attribute(discard); 89rw_attribute(discard);
@@ -127,7 +132,7 @@ SHOW(__bch_cached_dev)
127 char derivative[20]; 132 char derivative[20];
128 char target[20]; 133 char target[20];
129 bch_hprint(dirty, 134 bch_hprint(dirty,
130 atomic_long_read(&dc->disk.sectors_dirty) << 9); 135 bcache_dev_sectors_dirty(&dc->disk) << 9);
131 bch_hprint(derivative, dc->writeback_rate_derivative << 9); 136 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
132 bch_hprint(target, dc->writeback_rate_target << 9); 137 bch_hprint(target, dc->writeback_rate_target << 9);
133 138
@@ -143,7 +148,10 @@ SHOW(__bch_cached_dev)
143 } 148 }
144 149
145 sysfs_hprint(dirty_data, 150 sysfs_hprint(dirty_data,
146 atomic_long_read(&dc->disk.sectors_dirty) << 9); 151 bcache_dev_sectors_dirty(&dc->disk) << 9);
152
153 sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9);
154 var_printf(partial_stripes_expensive, "%u");
147 155
148 var_printf(sequential_merge, "%i"); 156 var_printf(sequential_merge, "%i");
149 var_hprint(sequential_cutoff); 157 var_hprint(sequential_cutoff);
@@ -170,6 +178,7 @@ STORE(__cached_dev)
170 disk.kobj); 178 disk.kobj);
171 unsigned v = size; 179 unsigned v = size;
172 struct cache_set *c; 180 struct cache_set *c;
181 struct kobj_uevent_env *env;
173 182
174#define d_strtoul(var) sysfs_strtoul(var, dc->var) 183#define d_strtoul(var) sysfs_strtoul(var, dc->var)
175#define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 184#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
@@ -214,6 +223,7 @@ STORE(__cached_dev)
214 } 223 }
215 224
216 if (attr == &sysfs_label) { 225 if (attr == &sysfs_label) {
226 /* note: endlines are preserved */
217 memcpy(dc->sb.label, buf, SB_LABEL_SIZE); 227 memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
218 bch_write_bdev_super(dc, NULL); 228 bch_write_bdev_super(dc, NULL);
219 if (dc->disk.c) { 229 if (dc->disk.c) {
@@ -221,6 +231,15 @@ STORE(__cached_dev)
221 buf, SB_LABEL_SIZE); 231 buf, SB_LABEL_SIZE);
222 bch_uuid_write(dc->disk.c); 232 bch_uuid_write(dc->disk.c);
223 } 233 }
234 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
235 if (!env)
236 return -ENOMEM;
237 add_uevent_var(env, "DRIVER=bcache");
238 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
239 add_uevent_var(env, "CACHED_LABEL=%s", buf);
240 kobject_uevent_env(
241 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
242 kfree(env);
224 } 243 }
225 244
226 if (attr == &sysfs_attach) { 245 if (attr == &sysfs_attach) {
@@ -284,6 +303,8 @@ static struct attribute *bch_cached_dev_files[] = {
284 &sysfs_writeback_rate_d_smooth, 303 &sysfs_writeback_rate_d_smooth,
285 &sysfs_writeback_rate_debug, 304 &sysfs_writeback_rate_debug,
286 &sysfs_dirty_data, 305 &sysfs_dirty_data,
306 &sysfs_stripe_size,
307 &sysfs_partial_stripes_expensive,
287 &sysfs_sequential_cutoff, 308 &sysfs_sequential_cutoff,
288 &sysfs_sequential_merge, 309 &sysfs_sequential_merge,
289 &sysfs_clear_stats, 310 &sysfs_clear_stats,
@@ -665,12 +686,10 @@ SHOW(__bch_cache)
665 int cmp(const void *l, const void *r) 686 int cmp(const void *l, const void *r)
666 { return *((uint16_t *) r) - *((uint16_t *) l); } 687 { return *((uint16_t *) r) - *((uint16_t *) l); }
667 688
668 /* Number of quantiles we compute */
669 const unsigned nq = 31;
670
671 size_t n = ca->sb.nbuckets, i, unused, btree; 689 size_t n = ca->sb.nbuckets, i, unused, btree;
672 uint64_t sum = 0; 690 uint64_t sum = 0;
673 uint16_t q[nq], *p, *cached; 691 /* Compute 31 quantiles */
692 uint16_t q[31], *p, *cached;
674 ssize_t ret; 693 ssize_t ret;
675 694
676 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t)); 695 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
@@ -703,26 +722,29 @@ SHOW(__bch_cache)
703 if (n) 722 if (n)
704 do_div(sum, n); 723 do_div(sum, n);
705 724
706 for (i = 0; i < nq; i++) 725 for (i = 0; i < ARRAY_SIZE(q); i++)
707 q[i] = INITIAL_PRIO - cached[n * (i + 1) / (nq + 1)]; 726 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
727 (ARRAY_SIZE(q) + 1)];
708 728
709 vfree(p); 729 vfree(p);
710 730
711 ret = snprintf(buf, PAGE_SIZE, 731 ret = scnprintf(buf, PAGE_SIZE,
712 "Unused: %zu%%\n" 732 "Unused: %zu%%\n"
713 "Metadata: %zu%%\n" 733 "Metadata: %zu%%\n"
714 "Average: %llu\n" 734 "Average: %llu\n"
715 "Sectors per Q: %zu\n" 735 "Sectors per Q: %zu\n"
716 "Quantiles: [", 736 "Quantiles: [",
717 unused * 100 / (size_t) ca->sb.nbuckets, 737 unused * 100 / (size_t) ca->sb.nbuckets,
718 btree * 100 / (size_t) ca->sb.nbuckets, sum, 738 btree * 100 / (size_t) ca->sb.nbuckets, sum,
719 n * ca->sb.bucket_size / (nq + 1)); 739 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
720 740
721 for (i = 0; i < nq && ret < (ssize_t) PAGE_SIZE; i++) 741 for (i = 0; i < ARRAY_SIZE(q); i++)
722 ret += snprintf(buf + ret, PAGE_SIZE - ret, 742 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
723 i < nq - 1 ? "%u " : "%u]\n", q[i]); 743 "%u ", q[i]);
724 744 ret--;
725 buf[PAGE_SIZE - 1] = '\0'; 745
746 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
747
726 return ret; 748 return ret;
727 } 749 }
728 750
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index 983f9bb411bc..f7b6c197f90f 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -2,6 +2,7 @@
2#include "btree.h" 2#include "btree.h"
3#include "request.h" 3#include "request.h"
4 4
5#include <linux/blktrace_api.h>
5#include <linux/module.h> 6#include <linux/module.h>
6 7
7#define CREATE_TRACE_POINTS 8#define CREATE_TRACE_POINTS
@@ -9,18 +10,44 @@
9 10
10EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start); 11EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start);
11EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end); 12EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end);
12EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_passthrough); 13
13EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_hit); 14EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_sequential);
14EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_miss); 15EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_congested);
16
17EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read);
18EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write);
15EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry); 19EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry);
16EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writethrough); 20
17EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback); 21EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert);
18EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_skip); 22
23EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_replay_key);
24EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write);
25EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_full);
26EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_entry_full);
27
28EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_cache_cannibalize);
29
19EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read); 30EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read);
20EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write); 31EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write);
21EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_dirty); 32
22EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_dirty); 33EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc);
23EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write); 34EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc_fail);
24EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert); 35EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_free);
36
37EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_gc_coalesce);
25EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start); 38EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start);
26EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end); 39EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end);
40EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy);
41EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy_collision);
42
43EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_insert_key);
44
45EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
46EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
47EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
48
49EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate);
50EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
51
52EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
53EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback_collision);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index da3a99e85b1e..98eb81159a22 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -228,23 +228,6 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
228 } 228 }
229} 229}
230 230
231int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp)
232{
233 int i;
234 struct bio_vec *bv;
235
236 bio_for_each_segment(bv, bio, i) {
237 bv->bv_page = alloc_page(gfp);
238 if (!bv->bv_page) {
239 while (bv-- != bio->bi_io_vec + bio->bi_idx)
240 __free_page(bv->bv_page);
241 return -ENOMEM;
242 }
243 }
244
245 return 0;
246}
247
248/* 231/*
249 * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any 232 * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
250 * use permitted, subject to terms of PostgreSQL license; see.) 233 * use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 577393e38c3a..1ae2a73ad85f 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -15,8 +15,6 @@
15 15
16struct closure; 16struct closure;
17 17
18#include <trace/events/bcache.h>
19
20#ifdef CONFIG_BCACHE_EDEBUG 18#ifdef CONFIG_BCACHE_EDEBUG
21 19
22#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) 20#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
@@ -566,12 +564,8 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
566 return x; 564 return x;
567} 565}
568 566
569#define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio))
570
571void bch_bio_map(struct bio *bio, void *base); 567void bch_bio_map(struct bio *bio, void *base);
572 568
573int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp);
574
575static inline sector_t bdev_sectors(struct block_device *bdev) 569static inline sector_t bdev_sectors(struct block_device *bdev)
576{ 570{
577 return bdev->bd_inode->i_size >> 9; 571 return bdev->bd_inode->i_size >> 9;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 2714ed3991d1..22cbff551628 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -9,6 +9,9 @@
9#include "bcache.h" 9#include "bcache.h"
10#include "btree.h" 10#include "btree.h"
11#include "debug.h" 11#include "debug.h"
12#include "writeback.h"
13
14#include <trace/events/bcache.h>
12 15
13static struct workqueue_struct *dirty_wq; 16static struct workqueue_struct *dirty_wq;
14 17
@@ -36,7 +39,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
36 39
37 int change = 0; 40 int change = 0;
38 int64_t error; 41 int64_t error;
39 int64_t dirty = atomic_long_read(&dc->disk.sectors_dirty); 42 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
40 int64_t derivative = dirty - dc->disk.sectors_dirty_last; 43 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
41 44
42 dc->disk.sectors_dirty_last = dirty; 45 dc->disk.sectors_dirty_last = dirty;
@@ -105,6 +108,31 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
105 return KEY_DIRTY(k); 108 return KEY_DIRTY(k);
106} 109}
107 110
111static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
112{
113 uint64_t stripe;
114 unsigned nr_sectors = KEY_SIZE(k);
115 struct cached_dev *dc = container_of(buf, struct cached_dev,
116 writeback_keys);
117 unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
118
119 if (!KEY_DIRTY(k))
120 return false;
121
122 stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
123 while (1) {
124 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
125 stripe_size)
126 return false;
127
128 if (nr_sectors <= stripe_size)
129 return true;
130
131 nr_sectors -= stripe_size;
132 stripe++;
133 }
134}
135
108static void dirty_init(struct keybuf_key *w) 136static void dirty_init(struct keybuf_key *w)
109{ 137{
110 struct dirty_io *io = w->private; 138 struct dirty_io *io = w->private;
@@ -149,7 +177,22 @@ static void refill_dirty(struct closure *cl)
149 searched_from_start = true; 177 searched_from_start = true;
150 } 178 }
151 179
152 bch_refill_keybuf(dc->disk.c, buf, &end); 180 if (dc->partial_stripes_expensive) {
181 uint64_t i;
182
183 for (i = 0; i < dc->disk.nr_stripes; i++)
184 if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
185 1 << dc->disk.stripe_size_bits)
186 goto full_stripes;
187
188 goto normal_refill;
189full_stripes:
190 bch_refill_keybuf(dc->disk.c, buf, &end,
191 dirty_full_stripe_pred);
192 } else {
193normal_refill:
194 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
195 }
153 196
154 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { 197 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
155 /* Searched the entire btree - delay awhile */ 198 /* Searched the entire btree - delay awhile */
@@ -181,10 +224,8 @@ void bch_writeback_queue(struct cached_dev *dc)
181 } 224 }
182} 225}
183 226
184void bch_writeback_add(struct cached_dev *dc, unsigned sectors) 227void bch_writeback_add(struct cached_dev *dc)
185{ 228{
186 atomic_long_add(sectors, &dc->disk.sectors_dirty);
187
188 if (!atomic_read(&dc->has_dirty) && 229 if (!atomic_read(&dc->has_dirty) &&
189 !atomic_xchg(&dc->has_dirty, 1)) { 230 !atomic_xchg(&dc->has_dirty, 1)) {
190 atomic_inc(&dc->count); 231 atomic_inc(&dc->count);
@@ -203,6 +244,34 @@ void bch_writeback_add(struct cached_dev *dc, unsigned sectors)
203 } 244 }
204} 245}
205 246
247void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
248 uint64_t offset, int nr_sectors)
249{
250 struct bcache_device *d = c->devices[inode];
251 unsigned stripe_size, stripe_offset;
252 uint64_t stripe;
253
254 if (!d)
255 return;
256
257 stripe_size = 1 << d->stripe_size_bits;
258 stripe = offset >> d->stripe_size_bits;
259 stripe_offset = offset & (stripe_size - 1);
260
261 while (nr_sectors) {
262 int s = min_t(unsigned, abs(nr_sectors),
263 stripe_size - stripe_offset);
264
265 if (nr_sectors < 0)
266 s = -s;
267
268 atomic_add(s, d->stripe_sectors_dirty + stripe);
269 nr_sectors -= s;
270 stripe_offset = 0;
271 stripe++;
272 }
273}
274
206/* Background writeback - IO loop */ 275/* Background writeback - IO loop */
207 276
208static void dirty_io_destructor(struct closure *cl) 277static void dirty_io_destructor(struct closure *cl)
@@ -216,9 +285,10 @@ static void write_dirty_finish(struct closure *cl)
216 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 285 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
217 struct keybuf_key *w = io->bio.bi_private; 286 struct keybuf_key *w = io->bio.bi_private;
218 struct cached_dev *dc = io->dc; 287 struct cached_dev *dc = io->dc;
219 struct bio_vec *bv = bio_iovec_idx(&io->bio, io->bio.bi_vcnt); 288 struct bio_vec *bv;
289 int i;
220 290
221 while (bv-- != io->bio.bi_io_vec) 291 bio_for_each_segment_all(bv, &io->bio, i)
222 __free_page(bv->bv_page); 292 __free_page(bv->bv_page);
223 293
224 /* This is kind of a dumb way of signalling errors. */ 294 /* This is kind of a dumb way of signalling errors. */
@@ -236,10 +306,12 @@ static void write_dirty_finish(struct closure *cl)
236 for (i = 0; i < KEY_PTRS(&w->key); i++) 306 for (i = 0; i < KEY_PTRS(&w->key); i++)
237 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 307 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
238 308
239 pr_debug("clearing %s", pkey(&w->key));
240 bch_btree_insert(&op, dc->disk.c); 309 bch_btree_insert(&op, dc->disk.c);
241 closure_sync(&op.cl); 310 closure_sync(&op.cl);
242 311
312 if (op.insert_collision)
313 trace_bcache_writeback_collision(&w->key);
314
243 atomic_long_inc(op.insert_collision 315 atomic_long_inc(op.insert_collision
244 ? &dc->disk.c->writeback_keys_failed 316 ? &dc->disk.c->writeback_keys_failed
245 : &dc->disk.c->writeback_keys_done); 317 : &dc->disk.c->writeback_keys_done);
@@ -275,7 +347,6 @@ static void write_dirty(struct closure *cl)
275 io->bio.bi_bdev = io->dc->bdev; 347 io->bio.bi_bdev = io->dc->bdev;
276 io->bio.bi_end_io = dirty_endio; 348 io->bio.bi_end_io = dirty_endio;
277 349
278 trace_bcache_write_dirty(&io->bio);
279 closure_bio_submit(&io->bio, cl, &io->dc->disk); 350 closure_bio_submit(&io->bio, cl, &io->dc->disk);
280 351
281 continue_at(cl, write_dirty_finish, dirty_wq); 352 continue_at(cl, write_dirty_finish, dirty_wq);
@@ -296,7 +367,6 @@ static void read_dirty_submit(struct closure *cl)
296{ 367{
297 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 368 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
298 369
299 trace_bcache_read_dirty(&io->bio);
300 closure_bio_submit(&io->bio, cl, &io->dc->disk); 370 closure_bio_submit(&io->bio, cl, &io->dc->disk);
301 371
302 continue_at(cl, write_dirty, dirty_wq); 372 continue_at(cl, write_dirty, dirty_wq);
@@ -349,10 +419,10 @@ static void read_dirty(struct closure *cl)
349 io->bio.bi_rw = READ; 419 io->bio.bi_rw = READ;
350 io->bio.bi_end_io = read_dirty_endio; 420 io->bio.bi_end_io = read_dirty_endio;
351 421
352 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) 422 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
353 goto err_free; 423 goto err_free;
354 424
355 pr_debug("%s", pkey(&w->key)); 425 trace_bcache_writeback(&w->key);
356 426
357 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); 427 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
358 428
@@ -375,12 +445,49 @@ err:
375 refill_dirty(cl); 445 refill_dirty(cl);
376} 446}
377 447
448/* Init */
449
450static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
451 struct cached_dev *dc)
452{
453 struct bkey *k;
454 struct btree_iter iter;
455
456 bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
457 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
458 if (!b->level) {
459 if (KEY_INODE(k) > dc->disk.id)
460 break;
461
462 if (KEY_DIRTY(k))
463 bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
464 KEY_START(k),
465 KEY_SIZE(k));
466 } else {
467 btree(sectors_dirty_init, k, b, op, dc);
468 if (KEY_INODE(k) > dc->disk.id)
469 break;
470
471 cond_resched();
472 }
473
474 return 0;
475}
476
477void bch_sectors_dirty_init(struct cached_dev *dc)
478{
479 struct btree_op op;
480
481 bch_btree_op_init_stack(&op);
482 btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
483}
484
378void bch_cached_dev_writeback_init(struct cached_dev *dc) 485void bch_cached_dev_writeback_init(struct cached_dev *dc)
379{ 486{
380 closure_init_unlocked(&dc->writeback); 487 closure_init_unlocked(&dc->writeback);
381 init_rwsem(&dc->writeback_lock); 488 init_rwsem(&dc->writeback_lock);
382 489
383 bch_keybuf_init(&dc->writeback_keys, dirty_pred); 490 bch_keybuf_init(&dc->writeback_keys);
384 491
385 dc->writeback_metadata = true; 492 dc->writeback_metadata = true;
386 dc->writeback_running = true; 493 dc->writeback_running = true;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
new file mode 100644
index 000000000000..c91f61bb95b6
--- /dev/null
+++ b/drivers/md/bcache/writeback.h
@@ -0,0 +1,64 @@
1#ifndef _BCACHE_WRITEBACK_H
2#define _BCACHE_WRITEBACK_H
3
4#define CUTOFF_WRITEBACK 40
5#define CUTOFF_WRITEBACK_SYNC 70
6
7static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
8{
9 uint64_t i, ret = 0;
10
11 for (i = 0; i < d->nr_stripes; i++)
12 ret += atomic_read(d->stripe_sectors_dirty + i);
13
14 return ret;
15}
16
17static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
18 uint64_t offset,
19 unsigned nr_sectors)
20{
21 uint64_t stripe = offset >> d->stripe_size_bits;
22
23 while (1) {
24 if (atomic_read(d->stripe_sectors_dirty + stripe))
25 return true;
26
27 if (nr_sectors <= 1 << d->stripe_size_bits)
28 return false;
29
30 nr_sectors -= 1 << d->stripe_size_bits;
31 stripe++;
32 }
33}
34
35static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
36 unsigned cache_mode, bool would_skip)
37{
38 unsigned in_use = dc->disk.c->gc_stats.in_use;
39
40 if (cache_mode != CACHE_MODE_WRITEBACK ||
41 atomic_read(&dc->disk.detaching) ||
42 in_use > CUTOFF_WRITEBACK_SYNC)
43 return false;
44
45 if (dc->partial_stripes_expensive &&
46 bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
47 bio_sectors(bio)))
48 return true;
49
50 if (would_skip)
51 return false;
52
53 return bio->bi_rw & REQ_SYNC ||
54 in_use <= CUTOFF_WRITEBACK;
55}
56
57void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
58void bch_writeback_queue(struct cached_dev *);
59void bch_writeback_add(struct cached_dev *);
60
61void bch_sectors_dirty_init(struct cached_dev *dc);
62void bch_cached_dev_writeback_init(struct cached_dev *);
63
64#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dddc87bcf64a..9f13e13506ef 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7716,20 +7716,6 @@ static int remove_and_add_spares(struct mddev *mddev,
7716 continue; 7716 continue;
7717 7717
7718 rdev->recovery_offset = 0; 7718 rdev->recovery_offset = 0;
7719 if (rdev->saved_raid_disk >= 0 && mddev->in_sync) {
7720 spin_lock_irq(&mddev->write_lock);
7721 if (mddev->in_sync)
7722 /* OK, this device, which is in_sync,
7723 * will definitely be noticed before
7724 * the next write, so recovery isn't
7725 * needed.
7726 */
7727 rdev->recovery_offset = mddev->recovery_cp;
7728 spin_unlock_irq(&mddev->write_lock);
7729 }
7730 if (mddev->ro && rdev->recovery_offset != MaxSector)
7731 /* not safe to add this disk now */
7732 continue;
7733 if (mddev->pers-> 7719 if (mddev->pers->
7734 hot_add_disk(mddev, rdev) == 0) { 7720 hot_add_disk(mddev, rdev) == 0) {
7735 if (sysfs_link_rdev(mddev, rdev)) 7721 if (sysfs_link_rdev(mddev, rdev))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ec734588a1c6..d60412c7f995 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1849,6 +1849,36 @@ static int process_checks(struct r1bio *r1_bio)
1849 int i; 1849 int i;
1850 int vcnt; 1850 int vcnt;
1851 1851
1852 /* Fix variable parts of all bios */
1853 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1854 for (i = 0; i < conf->raid_disks * 2; i++) {
1855 int j;
1856 int size;
1857 struct bio *b = r1_bio->bios[i];
1858 if (b->bi_end_io != end_sync_read)
1859 continue;
1860 /* fixup the bio for reuse */
1861 bio_reset(b);
1862 b->bi_vcnt = vcnt;
1863 b->bi_size = r1_bio->sectors << 9;
1864 b->bi_sector = r1_bio->sector +
1865 conf->mirrors[i].rdev->data_offset;
1866 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1867 b->bi_end_io = end_sync_read;
1868 b->bi_private = r1_bio;
1869
1870 size = b->bi_size;
1871 for (j = 0; j < vcnt ; j++) {
1872 struct bio_vec *bi;
1873 bi = &b->bi_io_vec[j];
1874 bi->bv_offset = 0;
1875 if (size > PAGE_SIZE)
1876 bi->bv_len = PAGE_SIZE;
1877 else
1878 bi->bv_len = size;
1879 size -= PAGE_SIZE;
1880 }
1881 }
1852 for (primary = 0; primary < conf->raid_disks * 2; primary++) 1882 for (primary = 0; primary < conf->raid_disks * 2; primary++)
1853 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1883 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1854 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { 1884 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
@@ -1857,12 +1887,10 @@ static int process_checks(struct r1bio *r1_bio)
1857 break; 1887 break;
1858 } 1888 }
1859 r1_bio->read_disk = primary; 1889 r1_bio->read_disk = primary;
1860 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1861 for (i = 0; i < conf->raid_disks * 2; i++) { 1890 for (i = 0; i < conf->raid_disks * 2; i++) {
1862 int j; 1891 int j;
1863 struct bio *pbio = r1_bio->bios[primary]; 1892 struct bio *pbio = r1_bio->bios[primary];
1864 struct bio *sbio = r1_bio->bios[i]; 1893 struct bio *sbio = r1_bio->bios[i];
1865 int size;
1866 1894
1867 if (sbio->bi_end_io != end_sync_read) 1895 if (sbio->bi_end_io != end_sync_read)
1868 continue; 1896 continue;
@@ -1888,27 +1916,6 @@ static int process_checks(struct r1bio *r1_bio)
1888 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 1916 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1889 continue; 1917 continue;
1890 } 1918 }
1891 /* fixup the bio for reuse */
1892 bio_reset(sbio);
1893 sbio->bi_vcnt = vcnt;
1894 sbio->bi_size = r1_bio->sectors << 9;
1895 sbio->bi_sector = r1_bio->sector +
1896 conf->mirrors[i].rdev->data_offset;
1897 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1898 sbio->bi_end_io = end_sync_read;
1899 sbio->bi_private = r1_bio;
1900
1901 size = sbio->bi_size;
1902 for (j = 0; j < vcnt ; j++) {
1903 struct bio_vec *bi;
1904 bi = &sbio->bi_io_vec[j];
1905 bi->bv_offset = 0;
1906 if (size > PAGE_SIZE)
1907 bi->bv_len = PAGE_SIZE;
1908 else
1909 bi->bv_len = size;
1910 size -= PAGE_SIZE;
1911 }
1912 1919
1913 bio_copy_data(sbio, pbio); 1920 bio_copy_data(sbio, pbio);
1914 } 1921 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index cd066b63bdaf..df7b0a06b0ea 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2097,11 +2097,17 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2097 * both 'first' and 'i', so we just compare them. 2097 * both 'first' and 'i', so we just compare them.
2098 * All vec entries are PAGE_SIZE; 2098 * All vec entries are PAGE_SIZE;
2099 */ 2099 */
2100 for (j = 0; j < vcnt; j++) 2100 int sectors = r10_bio->sectors;
2101 for (j = 0; j < vcnt; j++) {
2102 int len = PAGE_SIZE;
2103 if (sectors < (len / 512))
2104 len = sectors * 512;
2101 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 2105 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2102 page_address(tbio->bi_io_vec[j].bv_page), 2106 page_address(tbio->bi_io_vec[j].bv_page),
2103 fbio->bi_io_vec[j].bv_len)) 2107 len))
2104 break; 2108 break;
2109 sectors -= len/512;
2110 }
2105 if (j == vcnt) 2111 if (j == vcnt)
2106 continue; 2112 continue;
2107 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); 2113 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
@@ -2284,12 +2290,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2284 d = r10_bio->devs[1].devnum; 2290 d = r10_bio->devs[1].devnum;
2285 wbio = r10_bio->devs[1].bio; 2291 wbio = r10_bio->devs[1].bio;
2286 wbio2 = r10_bio->devs[1].repl_bio; 2292 wbio2 = r10_bio->devs[1].repl_bio;
2293 /* Need to test wbio2->bi_end_io before we call
2294 * generic_make_request as if the former is NULL,
2295 * the latter is free to free wbio2.
2296 */
2297 if (wbio2 && !wbio2->bi_end_io)
2298 wbio2 = NULL;
2287 if (wbio->bi_end_io) { 2299 if (wbio->bi_end_io) {
2288 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2300 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2289 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2301 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2290 generic_make_request(wbio); 2302 generic_make_request(wbio);
2291 } 2303 }
2292 if (wbio2 && wbio2->bi_end_io) { 2304 if (wbio2) {
2293 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2305 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2294 md_sync_acct(conf->mirrors[d].replacement->bdev, 2306 md_sync_acct(conf->mirrors[d].replacement->bdev,
2295 bio_sectors(wbio2)); 2307 bio_sectors(wbio2));
@@ -3407,6 +3419,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3407 3419
3408 if (bio->bi_end_io == end_sync_read) { 3420 if (bio->bi_end_io == end_sync_read) {
3409 md_sync_acct(bio->bi_bdev, nr_sectors); 3421 md_sync_acct(bio->bi_bdev, nr_sectors);
3422 set_bit(BIO_UPTODATE, &bio->bi_flags);
3410 generic_make_request(bio); 3423 generic_make_request(bio);
3411 } 3424 }
3412 } 3425 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2bf094a587cb..78ea44336e75 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh)
3462 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3462 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3463 set_bit(STRIPE_SYNCING, &sh->state); 3463 set_bit(STRIPE_SYNCING, &sh->state);
3464 clear_bit(STRIPE_INSYNC, &sh->state); 3464 clear_bit(STRIPE_INSYNC, &sh->state);
3465 clear_bit(STRIPE_REPLACED, &sh->state);
3465 } 3466 }
3466 spin_unlock(&sh->stripe_lock); 3467 spin_unlock(&sh->stripe_lock);
3467 } 3468 }
@@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh)
3607 handle_parity_checks5(conf, sh, &s, disks); 3608 handle_parity_checks5(conf, sh, &s, disks);
3608 } 3609 }
3609 3610
3610 if (s.replacing && s.locked == 0 3611 if ((s.replacing || s.syncing) && s.locked == 0
3611 && !test_bit(STRIPE_INSYNC, &sh->state)) { 3612 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
3613 && !test_bit(STRIPE_REPLACED, &sh->state)) {
3612 /* Write out to replacement devices where possible */ 3614 /* Write out to replacement devices where possible */
3613 for (i = 0; i < conf->raid_disks; i++) 3615 for (i = 0; i < conf->raid_disks; i++)
3614 if (test_bit(R5_UPTODATE, &sh->dev[i].flags) && 3616 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
3615 test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 3617 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
3616 set_bit(R5_WantReplace, &sh->dev[i].flags); 3618 set_bit(R5_WantReplace, &sh->dev[i].flags);
3617 set_bit(R5_LOCKED, &sh->dev[i].flags); 3619 set_bit(R5_LOCKED, &sh->dev[i].flags);
3618 s.locked++; 3620 s.locked++;
3619 } 3621 }
3620 set_bit(STRIPE_INSYNC, &sh->state); 3622 if (s.replacing)
3623 set_bit(STRIPE_INSYNC, &sh->state);
3624 set_bit(STRIPE_REPLACED, &sh->state);
3621 } 3625 }
3622 if ((s.syncing || s.replacing) && s.locked == 0 && 3626 if ((s.syncing || s.replacing) && s.locked == 0 &&
3627 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3623 test_bit(STRIPE_INSYNC, &sh->state)) { 3628 test_bit(STRIPE_INSYNC, &sh->state)) {
3624 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3629 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3625 clear_bit(STRIPE_SYNCING, &sh->state); 3630 clear_bit(STRIPE_SYNCING, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b0b663b119a8..70c49329ca9a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -306,6 +306,7 @@ enum {
306 STRIPE_SYNC_REQUESTED, 306 STRIPE_SYNC_REQUESTED,
307 STRIPE_SYNCING, 307 STRIPE_SYNCING,
308 STRIPE_INSYNC, 308 STRIPE_INSYNC,
309 STRIPE_REPLACED,
309 STRIPE_PREREAD_ACTIVE, 310 STRIPE_PREREAD_ACTIVE,
310 STRIPE_DELAYED, 311 STRIPE_DELAYED,
311 STRIPE_DEGRADED, 312 STRIPE_DEGRADED,
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index efdc873e58d1..a9857022f71d 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
117{ 117{
118 struct v4l2_subdev *sd = to_sd(ctrl); 118 struct v4l2_subdev *sd = to_sd(ctrl);
119 struct i2c_client *client = v4l2_get_subdevdata(sd); 119 struct i2c_client *client = v4l2_get_subdevdata(sd);
120 int ret; 120 int ret = -EINVAL;
121 121
122 switch (ctrl->id) { 122 switch (ctrl->id) {
123 case V4L2_CID_BRIGHTNESS: 123 case V4L2_CID_BRIGHTNESS:
@@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
157 break; 157 break;
158 } 158 }
159 159
160 return 0; 160 return ret;
161} 161}
162 162
163static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) 163static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 10460fd3ce39..dbcdfbf8aed0 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -172,7 +172,9 @@ static void saa7134_irq_alsa_done(struct saa7134_dev *dev,
172 dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count, 172 dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count,
173 dev->dmasound.bufsize, dev->dmasound.blocks); 173 dev->dmasound.bufsize, dev->dmasound.blocks);
174 spin_unlock(&dev->slock); 174 spin_unlock(&dev->slock);
175 snd_pcm_stream_lock(dev->dmasound.substream);
175 snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN); 176 snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN);
177 snd_pcm_stream_unlock(dev->dmasound.substream);
176 return; 178 return;
177 } 179 }
178 180
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index df4ada880e42..bd9405df1bd6 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids);
1987 1987
1988#ifdef CONFIG_OF 1988#ifdef CONFIG_OF
1989static const struct of_device_id coda_dt_ids[] = { 1989static const struct of_device_id coda_dt_ids[] = {
1990 { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] }, 1990 { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
1991 { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, 1991 { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
1992 { /* sentinel */ } 1992 { /* sentinel */ }
1993}; 1993};
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 553d87e5ceab..fd6289d60cde 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev)
784 } 784 }
785 *vfd = g2d_videodev; 785 *vfd = g2d_videodev;
786 vfd->lock = &dev->mutex; 786 vfd->lock = &dev->mutex;
787 vfd->v4l2_dev = &dev->v4l2_dev;
787 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); 788 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
788 if (ret) { 789 if (ret) {
789 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); 790 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 5296385153d5..4f6dd42c9adb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
344 pix_mp->num_planes = 2; 344 pix_mp->num_planes = 2;
345 /* Set pixelformat to the format in which MFC 345 /* Set pixelformat to the format in which MFC
346 outputs the decoded frame */ 346 outputs the decoded frame */
347 pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT; 347 pix_mp->pixelformat = ctx->dst_fmt->fourcc;
348 pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; 348 pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
349 pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; 349 pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
350 pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; 350 pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
@@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
382 mfc_err("Unsupported format for source.\n"); 382 mfc_err("Unsupported format for source.\n");
383 return -EINVAL; 383 return -EINVAL;
384 } 384 }
385 if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { 385 if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
386 mfc_err("Not supported format.\n"); 386 mfc_err("Unknown codec\n");
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 if (!IS_MFCV6(dev)) {
390 if (fmt->fourcc == V4L2_PIX_FMT_VP8) {
391 mfc_err("Not supported format.\n");
392 return -EINVAL;
393 }
394 }
389 } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 395 } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
390 fmt = find_format(f, MFC_FMT_RAW); 396 fmt = find_format(f, MFC_FMT_RAW);
391 if (!fmt) { 397 if (!fmt) {
@@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
411 struct s5p_mfc_dev *dev = video_drvdata(file); 417 struct s5p_mfc_dev *dev = video_drvdata(file);
412 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); 418 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
413 int ret = 0; 419 int ret = 0;
414 struct s5p_mfc_fmt *fmt;
415 struct v4l2_pix_format_mplane *pix_mp; 420 struct v4l2_pix_format_mplane *pix_mp;
416 421
417 mfc_debug_enter(); 422 mfc_debug_enter();
@@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
425 goto out; 430 goto out;
426 } 431 }
427 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 432 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
428 fmt = find_format(f, MFC_FMT_RAW); 433 /* dst_fmt is validated by call to vidioc_try_fmt */
429 if (!fmt) { 434 ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
430 mfc_err("Unsupported format for source.\n"); 435 ret = 0;
431 return -EINVAL;
432 }
433 if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
434 mfc_err("Not supported format.\n");
435 return -EINVAL;
436 } else if (IS_MFCV6(dev) &&
437 (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
438 mfc_err("Not supported format.\n");
439 return -EINVAL;
440 }
441 ctx->dst_fmt = fmt;
442 mfc_debug_leave();
443 return ret;
444 } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
445 mfc_err("Wrong type error for S_FMT : %d", f->type);
446 return -EINVAL;
447 }
448 fmt = find_format(f, MFC_FMT_DEC);
449 if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) {
450 mfc_err("Unknown codec\n");
451 ret = -EINVAL;
452 goto out; 436 goto out;
453 } 437 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
454 if (fmt->type != MFC_FMT_DEC) { 438 /* src_fmt is validated by call to vidioc_try_fmt */
455 mfc_err("Wrong format selected, you should choose " 439 ctx->src_fmt = find_format(f, MFC_FMT_DEC);
456 "format for decoding\n"); 440 ctx->codec_mode = ctx->src_fmt->codec_mode;
441 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
442 pix_mp->height = 0;
443 pix_mp->width = 0;
444 if (pix_mp->plane_fmt[0].sizeimage)
445 ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
446 else
447 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
448 DEF_CPB_SIZE;
449 pix_mp->plane_fmt[0].bytesperline = 0;
450 ctx->state = MFCINST_INIT;
451 ret = 0;
452 goto out;
453 } else {
454 mfc_err("Wrong type error for S_FMT : %d", f->type);
457 ret = -EINVAL; 455 ret = -EINVAL;
458 goto out; 456 goto out;
459 } 457 }
460 if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { 458
461 mfc_err("Not supported format.\n");
462 return -EINVAL;
463 }
464 ctx->src_fmt = fmt;
465 ctx->codec_mode = fmt->codec_mode;
466 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
467 pix_mp->height = 0;
468 pix_mp->width = 0;
469 if (pix_mp->plane_fmt[0].sizeimage)
470 ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
471 else
472 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
473 DEF_CPB_SIZE;
474 pix_mp->plane_fmt[0].bytesperline = 0;
475 ctx->state = MFCINST_INIT;
476out: 459out:
477 mfc_debug_leave(); 460 mfc_debug_leave();
478 return ret; 461 return ret;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2549967b2f85..59e56f4c8ce3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
906 906
907static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) 907static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
908{ 908{
909 struct s5p_mfc_dev *dev = video_drvdata(file);
909 struct s5p_mfc_fmt *fmt; 910 struct s5p_mfc_fmt *fmt;
910 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; 911 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
911 912
@@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
930 return -EINVAL; 931 return -EINVAL;
931 } 932 }
932 933
934 if (!IS_MFCV6(dev)) {
935 if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
936 mfc_err("Not supported format.\n");
937 return -EINVAL;
938 }
939 } else if (IS_MFCV6(dev)) {
940 if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
941 mfc_err("Not supported format.\n");
942 return -EINVAL;
943 }
944 }
945
933 if (fmt->num_planes != pix_fmt_mp->num_planes) { 946 if (fmt->num_planes != pix_fmt_mp->num_planes) {
934 mfc_err("failed to try output format\n"); 947 mfc_err("failed to try output format\n");
935 return -EINVAL; 948 return -EINVAL;
@@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
947{ 960{
948 struct s5p_mfc_dev *dev = video_drvdata(file); 961 struct s5p_mfc_dev *dev = video_drvdata(file);
949 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); 962 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
950 struct s5p_mfc_fmt *fmt;
951 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; 963 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
952 int ret = 0; 964 int ret = 0;
953 965
@@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
960 goto out; 972 goto out;
961 } 973 }
962 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 974 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
963 fmt = find_format(f, MFC_FMT_ENC); 975 /* dst_fmt is validated by call to vidioc_try_fmt */
964 if (!fmt) { 976 ctx->dst_fmt = find_format(f, MFC_FMT_ENC);
965 mfc_err("failed to set capture format\n");
966 return -EINVAL;
967 }
968 ctx->state = MFCINST_INIT; 977 ctx->state = MFCINST_INIT;
969 ctx->dst_fmt = fmt;
970 ctx->codec_mode = ctx->dst_fmt->codec_mode; 978 ctx->codec_mode = ctx->dst_fmt->codec_mode;
971 ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; 979 ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
972 pix_fmt_mp->plane_fmt[0].bytesperline = 0; 980 pix_fmt_mp->plane_fmt[0].bytesperline = 0;
@@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
987 } 995 }
988 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); 996 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
989 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 997 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
990 fmt = find_format(f, MFC_FMT_RAW); 998 /* src_fmt is validated by call to vidioc_try_fmt */
991 if (!fmt) { 999 ctx->src_fmt = find_format(f, MFC_FMT_RAW);
992 mfc_err("failed to set output format\n");
993 return -EINVAL;
994 }
995
996 if (!IS_MFCV6(dev) &&
997 (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) {
998 mfc_err("Not supported format.\n");
999 return -EINVAL;
1000 } else if (IS_MFCV6(dev) &&
1001 (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
1002 mfc_err("Not supported format.\n");
1003 return -EINVAL;
1004 }
1005
1006 if (fmt->num_planes != pix_fmt_mp->num_planes) {
1007 mfc_err("failed to set output format\n");
1008 ret = -EINVAL;
1009 goto out;
1010 }
1011 ctx->src_fmt = fmt;
1012 ctx->img_width = pix_fmt_mp->width; 1000 ctx->img_width = pix_fmt_mp->width;
1013 ctx->img_height = pix_fmt_mp->height; 1001 ctx->img_height = pix_fmt_mp->height;
1014 mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); 1002 mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 4851cc2e4a4d..c4ff9739a7ae 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
726 726
727 *eedata = data; 727 *eedata = data;
728 *eedata_len = len; 728 *eedata_len = len;
729 dev_config = (void *)eedata; 729 dev_config = (void *)*eedata;
730 730
731 switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { 731 switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
732 case 0: 732 case 0:
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index cb694055ba7d..6e5070774dc2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface,
303 303
304 dev->workqueue = 0; 304 dev->workqueue = 0;
305 305
306 /* init video transfer queues first of all */
307 /* to prevent oops in hdpvr_delete() on error paths */
308 INIT_LIST_HEAD(&dev->free_buff_list);
309 INIT_LIST_HEAD(&dev->rec_buff_list);
310
306 /* register v4l2_device early so it can be used for printks */ 311 /* register v4l2_device early so it can be used for printks */
307 if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { 312 if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
308 dev_err(&interface->dev, "v4l2_device_register failed\n"); 313 dev_err(&interface->dev, "v4l2_device_register failed\n");
@@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface,
325 if (!dev->workqueue) 330 if (!dev->workqueue)
326 goto error; 331 goto error;
327 332
328 /* init video transfer queues */
329 INIT_LIST_HEAD(&dev->free_buff_list);
330 INIT_LIST_HEAD(&dev->rec_buff_list);
331
332 dev->options = hdpvr_default_options; 333 dev->options = hdpvr_default_options;
333 334
334 if (default_video_input < HDPVR_VIDEO_INPUTS) 335 if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface,
405 video_nr[atomic_inc_return(&dev_nr)]); 406 video_nr[atomic_inc_return(&dev_nr)]);
406 if (retval < 0) { 407 if (retval < 0) {
407 v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); 408 v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
408 goto error; 409 goto reg_fail;
409 } 410 }
410 411
411 /* let the user know what node this device is now attached to */ 412 /* let the user know what node this device is now attached to */
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 8864436464bf..7c5b86006ee6 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_USBTV 1config VIDEO_USBTV
2 tristate "USBTV007 video capture support" 2 tristate "USBTV007 video capture support"
3 depends on VIDEO_DEV 3 depends on VIDEO_V4L2
4 select VIDEOBUF2_VMALLOC 4 select VIDEOBUF2_VMALLOC
5 5
6 ---help--- 6 ---help---
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index bf43f874685e..91650173941a 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -57,7 +57,7 @@
57#define USBTV_CHUNK_SIZE 256 57#define USBTV_CHUNK_SIZE 256
58#define USBTV_CHUNK 240 58#define USBTV_CHUNK 240
59#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ 59#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
60 / 2 / USBTV_CHUNK) 60 / 4 / USBTV_CHUNK)
61 61
62/* Chunk header. */ 62/* Chunk header. */
63#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ 63#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -89,6 +89,7 @@ struct usbtv {
89 /* Number of currently processed frame, useful find 89 /* Number of currently processed frame, useful find
90 * out when a new one begins. */ 90 * out when a new one begins. */
91 u32 frame_id; 91 u32 frame_id;
92 int chunks_done;
92 93
93 int iso_size; 94 int iso_size;
94 unsigned int sequence; 95 unsigned int sequence;
@@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
202 return 0; 203 return 0;
203} 204}
204 205
206/* Copy data from chunk into a frame buffer, deinterlacing the data
207 * into every second line. Unfortunately, they don't align nicely into
208 * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
209 * Therefore, we break down the chunk into two halves before copyting,
210 * so that we can interleave a line if needed. */
211static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
212{
213 int half;
214
215 for (half = 0; half < 2; half++) {
216 int part_no = chunk_no * 2 + half;
217 int line = part_no / 3;
218 int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
219
220 u32 *dst = &frame[part_index * USBTV_CHUNK/2];
221 memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
222 src += USBTV_CHUNK/2;
223 }
224}
225
205/* Called for each 256-byte image chunk. 226/* Called for each 256-byte image chunk.
206 * First word identifies the chunk, followed by 240 words of image 227 * First word identifies the chunk, followed by 240 words of image
207 * data and padding. */ 228 * data and padding. */
@@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
218 frame_id = USBTV_FRAME_ID(chunk); 239 frame_id = USBTV_FRAME_ID(chunk);
219 odd = USBTV_ODD(chunk); 240 odd = USBTV_ODD(chunk);
220 chunk_no = USBTV_CHUNK_NO(chunk); 241 chunk_no = USBTV_CHUNK_NO(chunk);
221
222 /* Deinterlace. TODO: Use interlaced frame format. */
223 chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3;
224 chunk_no += !odd * 3;
225
226 if (chunk_no >= USBTV_CHUNKS) 242 if (chunk_no >= USBTV_CHUNKS)
227 return; 243 return;
228 244
229 /* Beginning of a frame. */ 245 /* Beginning of a frame. */
230 if (chunk_no == 0) 246 if (chunk_no == 0) {
231 usbtv->frame_id = frame_id; 247 usbtv->frame_id = frame_id;
248 usbtv->chunks_done = 0;
249 }
250
251 if (usbtv->frame_id != frame_id)
252 return;
232 253
233 spin_lock_irqsave(&usbtv->buflock, flags); 254 spin_lock_irqsave(&usbtv->buflock, flags);
234 if (list_empty(&usbtv->bufs)) { 255 if (list_empty(&usbtv->bufs)) {
@@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
241 buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); 262 buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
242 frame = vb2_plane_vaddr(&buf->vb, 0); 263 frame = vb2_plane_vaddr(&buf->vb, 0);
243 264
244 /* Copy the chunk. */ 265 /* Copy the chunk data. */
245 memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1], 266 usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
246 USBTV_CHUNK * sizeof(chunk[1])); 267 usbtv->chunks_done++;
247 268
248 /* Last chunk in a frame, signalling an end */ 269 /* Last chunk in a frame, signalling an end */
249 if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) { 270 if (odd && chunk_no == USBTV_CHUNKS-1) {
250 int size = vb2_plane_size(&buf->vb, 0); 271 int size = vb2_plane_size(&buf->vb, 0);
272 enum vb2_buffer_state state = usbtv->chunks_done ==
273 USBTV_CHUNKS ?
274 VB2_BUF_STATE_DONE :
275 VB2_BUF_STATE_ERROR;
251 276
252 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; 277 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
253 buf->vb.v4l2_buf.sequence = usbtv->sequence++; 278 buf->vb.v4l2_buf.sequence = usbtv->sequence++;
254 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 279 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
255 vb2_set_plane_payload(&buf->vb, 0, size); 280 vb2_set_plane_payload(&buf->vb, 0, size);
256 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 281 vb2_buffer_done(&buf->vb, state);
257 list_del(&buf->list); 282 list_del(&buf->list);
258 } 283 }
259 284
@@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
518 if (*nbuffers < 2) 543 if (*nbuffers < 2)
519 *nbuffers = 2; 544 *nbuffers = 2;
520 *nplanes = 1; 545 *nplanes = 1;
521 sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32); 546 sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
522 547
523 return 0; 548 return 0;
524} 549}
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index f7b90661e321..e068a76a5f6f 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -66,14 +66,19 @@ EXPORT_SYMBOL(ssc_request);
66 66
67void ssc_free(struct ssc_device *ssc) 67void ssc_free(struct ssc_device *ssc)
68{ 68{
69 bool disable_clk = true;
70
69 spin_lock(&user_lock); 71 spin_lock(&user_lock);
70 if (ssc->user) { 72 if (ssc->user)
71 ssc->user--; 73 ssc->user--;
72 clk_disable_unprepare(ssc->clk); 74 else {
73 } else { 75 disable_clk = false;
74 dev_dbg(&ssc->pdev->dev, "device already free\n"); 76 dev_dbg(&ssc->pdev->dev, "device already free\n");
75 } 77 }
76 spin_unlock(&user_lock); 78 spin_unlock(&user_lock);
79
80 if (disable_clk)
81 clk_disable_unprepare(ssc->clk);
77} 82}
78EXPORT_SYMBOL(ssc_free); 83EXPORT_SYMBOL(ssc_free);
79 84
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index f9296abcf02a..6127ab64bb39 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -167,7 +167,7 @@ int mei_hbm_start_req(struct mei_device *dev)
167 167
168 dev->hbm_state = MEI_HBM_IDLE; 168 dev->hbm_state = MEI_HBM_IDLE;
169 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { 169 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
170 dev_err(&dev->pdev->dev, "version message writet failed\n"); 170 dev_err(&dev->pdev->dev, "version message write failed\n");
171 dev->dev_state = MEI_DEV_RESETTING; 171 dev->dev_state = MEI_DEV_RESETTING;
172 mei_reset(dev, 1); 172 mei_reset(dev, 1);
173 return -ENODEV; 173 return -ENODEV;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index e4f8dec4dc3c..b22c7e247225 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -239,14 +239,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
239 if (mei_me_hw_is_ready(dev)) 239 if (mei_me_hw_is_ready(dev))
240 return 0; 240 return 0;
241 241
242 dev->recvd_hw_ready = false;
242 mutex_unlock(&dev->device_lock); 243 mutex_unlock(&dev->device_lock);
243 err = wait_event_interruptible_timeout(dev->wait_hw_ready, 244 err = wait_event_interruptible_timeout(dev->wait_hw_ready,
244 dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT); 245 dev->recvd_hw_ready,
246 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
245 mutex_lock(&dev->device_lock); 247 mutex_lock(&dev->device_lock);
246 if (!err && !dev->recvd_hw_ready) { 248 if (!err && !dev->recvd_hw_ready) {
249 if (!err)
250 err = -ETIMEDOUT;
247 dev_err(&dev->pdev->dev, 251 dev_err(&dev->pdev->dev,
248 "wait hw ready failed. status = 0x%x\n", err); 252 "wait hw ready failed. status = %d\n", err);
249 return -ETIMEDOUT; 253 return err;
250 } 254 }
251 255
252 dev->recvd_hw_ready = false; 256 dev->recvd_hw_ready = false;
@@ -483,7 +487,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
483 /* check if ME wants a reset */ 487 /* check if ME wants a reset */
484 if (!mei_hw_is_ready(dev) && 488 if (!mei_hw_is_ready(dev) &&
485 dev->dev_state != MEI_DEV_RESETTING && 489 dev->dev_state != MEI_DEV_RESETTING &&
486 dev->dev_state != MEI_DEV_INITIALIZING) { 490 dev->dev_state != MEI_DEV_INITIALIZING &&
491 dev->dev_state != MEI_DEV_POWER_DOWN &&
492 dev->dev_state != MEI_DEV_POWER_UP) {
487 dev_dbg(&dev->pdev->dev, "FW not ready.\n"); 493 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
488 mei_reset(dev, 1); 494 mei_reset(dev, 1);
489 mutex_unlock(&dev->device_lock); 495 mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index ed1d75203af6..e6f16f83ecde 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -148,7 +148,8 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
148 148
149 dev->hbm_state = MEI_HBM_IDLE; 149 dev->hbm_state = MEI_HBM_IDLE;
150 150
151 if (dev->dev_state != MEI_DEV_INITIALIZING) { 151 if (dev->dev_state != MEI_DEV_INITIALIZING &&
152 dev->dev_state != MEI_DEV_POWER_UP) {
152 if (dev->dev_state != MEI_DEV_DISABLED && 153 if (dev->dev_state != MEI_DEV_DISABLED &&
153 dev->dev_state != MEI_DEV_POWER_DOWN) 154 dev->dev_state != MEI_DEV_POWER_DOWN)
154 dev->dev_state = MEI_DEV_RESETTING; 155 dev->dev_state = MEI_DEV_RESETTING;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 847b1996ce8e..2c5a91bb8ec3 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -128,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host,
128 !!on ^ host->pdata->gpio_power_invert); 128 !!on ^ host->pdata->gpio_power_invert);
129 } 129 }
130 if (!host->vcc && host->pdata && host->pdata->setpower) 130 if (!host->vcc && host->pdata && host->pdata->setpower)
131 host->pdata->setpower(mmc_dev(host->mmc), vdd); 131 return host->pdata->setpower(mmc_dev(host->mmc), vdd);
132 132
133 return 0; 133 return 0;
134} 134}
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index a746ba272f04..a956053608f9 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
1007 1007
1008 soft = &pkt.soft.rfc1201; 1008 soft = &pkt.soft.rfc1201;
1009 1009
1010 lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE)); 1010 lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
1011 if (pkt.hard.offset[0]) { 1011 if (pkt.hard.offset[0]) {
1012 ofs = pkt.hard.offset[0]; 1012 ofs = pkt.hard.offset[0];
1013 length = 256 - ofs; 1013 length = 256 - ofs;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 6aa7b3266c80..ac6177d3befc 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -412,10 +412,20 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
412 412
413 switch (msg->msg.hdr.cmd) { 413 switch (msg->msg.hdr.cmd) {
414 case CMD_CAN_RX: 414 case CMD_CAN_RX:
415 if (msg->msg.rx.net >= dev->net_count) {
416 dev_err(dev->udev->dev.parent, "format error\n");
417 break;
418 }
419
415 esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg); 420 esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
416 break; 421 break;
417 422
418 case CMD_CAN_TX: 423 case CMD_CAN_TX:
424 if (msg->msg.txdone.net >= dev->net_count) {
425 dev_err(dev->udev->dev.parent, "format error\n");
426 break;
427 }
428
419 esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net], 429 esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
420 msg); 430 msg);
421 break; 431 break;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index cbd388eea682..8becd3d838b5 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -779,6 +779,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
779 usb_unanchor_urb(urb); 779 usb_unanchor_urb(urb);
780 usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, 780 usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf,
781 urb->transfer_dma); 781 urb->transfer_dma);
782 usb_free_urb(urb);
782 break; 783 break;
783 } 784 }
784 785
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
index 53ad213e865b..d8d95d4cd45a 100644
--- a/drivers/net/ethernet/allwinner/Kconfig
+++ b/drivers/net/ethernet/allwinner/Kconfig
@@ -3,19 +3,20 @@
3# 3#
4 4
5config NET_VENDOR_ALLWINNER 5config NET_VENDOR_ALLWINNER
6 bool "Allwinner devices" 6 bool "Allwinner devices"
7 default y 7 default y
8 depends on ARCH_SUNXI
9 ---help---
10 If you have a network (Ethernet) card belonging to this
11 class, say Y and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13 8
14 Note that the answer to this question doesn't directly 9 depends on ARCH_SUNXI
15 affect the kernel: saying N will just cause the configurator 10 ---help---
16 to skip all the questions about Allwinner cards. If you say Y, 11 If you have a network (Ethernet) card belonging to this
17 you will be asked for your specific card in the following 12 class, say Y and read the Ethernet-HOWTO, available from
18 questions. 13 <http://www.tldp.org/docs.html#howto>.
14
15 Note that the answer to this question doesn't directly
16 affect the kernel: saying N will just cause the configurator
17 to skip all the questions about Allwinner cards. If you say Y,
18 you will be asked for your specific card in the following
19 questions.
19 20
20if NET_VENDOR_ALLWINNER 21if NET_VENDOR_ALLWINNER
21 22
@@ -26,6 +27,7 @@ config SUN4I_EMAC
26 select CRC32 27 select CRC32
27 select MII 28 select MII
28 select PHYLIB 29 select PHYLIB
30 select MDIO_SUN4I
29 ---help--- 31 ---help---
30 Support for Allwinner A10 EMAC ethernet driver. 32 Support for Allwinner A10 EMAC ethernet driver.
31 33
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b2bf324631dc..0f0556526ba9 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -520,6 +520,9 @@ struct atl1c_adapter {
520 struct net_device *netdev; 520 struct net_device *netdev;
521 struct pci_dev *pdev; 521 struct pci_dev *pdev;
522 struct napi_struct napi; 522 struct napi_struct napi;
523 struct page *rx_page;
524 unsigned int rx_page_offset;
525 unsigned int rx_frag_size;
523 struct atl1c_hw hw; 526 struct atl1c_hw hw;
524 struct atl1c_hw_stats hw_stats; 527 struct atl1c_hw_stats hw_stats;
525 struct mii_if_info mii; /* MII interface info */ 528 struct mii_if_info mii; /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 786a87483298..a36a760ada28 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -481,10 +481,15 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
481static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, 481static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
482 struct net_device *dev) 482 struct net_device *dev)
483{ 483{
484 unsigned int head_size;
484 int mtu = dev->mtu; 485 int mtu = dev->mtu;
485 486
486 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? 487 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
487 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; 488 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
489
490 head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) +
491 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
492 adapter->rx_frag_size = roundup_pow_of_two(head_size);
488} 493}
489 494
490static netdev_features_t atl1c_fix_features(struct net_device *netdev, 495static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -952,6 +957,10 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
952 kfree(adapter->tpd_ring[0].buffer_info); 957 kfree(adapter->tpd_ring[0].buffer_info);
953 adapter->tpd_ring[0].buffer_info = NULL; 958 adapter->tpd_ring[0].buffer_info = NULL;
954 } 959 }
960 if (adapter->rx_page) {
961 put_page(adapter->rx_page);
962 adapter->rx_page = NULL;
963 }
955} 964}
956 965
957/** 966/**
@@ -1639,6 +1648,35 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1639 skb_checksum_none_assert(skb); 1648 skb_checksum_none_assert(skb);
1640} 1649}
1641 1650
1651static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
1652{
1653 struct sk_buff *skb;
1654 struct page *page;
1655
1656 if (adapter->rx_frag_size > PAGE_SIZE)
1657 return netdev_alloc_skb(adapter->netdev,
1658 adapter->rx_buffer_len);
1659
1660 page = adapter->rx_page;
1661 if (!page) {
1662 adapter->rx_page = page = alloc_page(GFP_ATOMIC);
1663 if (unlikely(!page))
1664 return NULL;
1665 adapter->rx_page_offset = 0;
1666 }
1667
1668 skb = build_skb(page_address(page) + adapter->rx_page_offset,
1669 adapter->rx_frag_size);
1670 if (likely(skb)) {
1671 adapter->rx_page_offset += adapter->rx_frag_size;
1672 if (adapter->rx_page_offset >= PAGE_SIZE)
1673 adapter->rx_page = NULL;
1674 else
1675 get_page(page);
1676 }
1677 return skb;
1678}
1679
1642static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) 1680static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1643{ 1681{
1644 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; 1682 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -1660,7 +1698,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1660 while (next_info->flags & ATL1C_BUFFER_FREE) { 1698 while (next_info->flags & ATL1C_BUFFER_FREE) {
1661 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); 1699 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1662 1700
1663 skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len); 1701 skb = atl1c_alloc_skb(adapter);
1664 if (unlikely(!skb)) { 1702 if (unlikely(!skb)) {
1665 if (netif_msg_rx_err(adapter)) 1703 if (netif_msg_rx_err(adapter))
1666 dev_warn(&pdev->dev, "alloc rx buffer failed\n"); 1704 dev_warn(&pdev->dev, "alloc rx buffer failed\n");
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 6d1a62a84c9d..1966444590f6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1678,6 +1678,7 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
1678 u16 f; 1678 u16 f;
1679 int segment; 1679 int segment;
1680 int ring_start = adapter->tx_ring.next_to_use; 1680 int ring_start = adapter->tx_ring.next_to_use;
1681 int ring_end;
1681 1682
1682 nr_frags = skb_shinfo(skb)->nr_frags; 1683 nr_frags = skb_shinfo(skb)->nr_frags;
1683 segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; 1684 segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
@@ -1721,6 +1722,15 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
1721 map_len, PCI_DMA_TODEVICE); 1722 map_len, PCI_DMA_TODEVICE);
1722 1723
1723 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { 1724 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1725 /* We need to unwind the mappings we've done */
1726 ring_end = adapter->tx_ring.next_to_use;
1727 adapter->tx_ring.next_to_use = ring_start;
1728 while (adapter->tx_ring.next_to_use != ring_end) {
1729 tpd = atl1e_get_tpd(adapter);
1730 tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1731 pci_unmap_single(adapter->pdev, tx_buffer->dma,
1732 tx_buffer->length, PCI_DMA_TODEVICE);
1733 }
1724 /* Reset the tx rings next pointer */ 1734 /* Reset the tx rings next pointer */
1725 adapter->tx_ring.next_to_use = ring_start; 1735 adapter->tx_ring.next_to_use = ring_start;
1726 return -ENOSPC; 1736 return -ENOSPC;
@@ -1763,6 +1773,16 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
1763 DMA_TO_DEVICE); 1773 DMA_TO_DEVICE);
1764 1774
1765 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { 1775 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1776 /* We need to unwind the mappings we've done */
1777 ring_end = adapter->tx_ring.next_to_use;
1778 adapter->tx_ring.next_to_use = ring_start;
1779 while (adapter->tx_ring.next_to_use != ring_end) {
1780 tpd = atl1e_get_tpd(adapter);
1781 tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1782 dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
1783 tx_buffer->length, DMA_TO_DEVICE);
1784 }
1785
1766 /* Reset the ring next to use pointer */ 1786 /* Reset the ring next to use pointer */
1767 adapter->tx_ring.next_to_use = ring_start; 1787 adapter->tx_ring.next_to_use = ring_start;
1768 return -ENOSPC; 1788 return -ENOSPC;
@@ -1853,8 +1873,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
1853 return NETDEV_TX_OK; 1873 return NETDEV_TX_OK;
1854 } 1874 }
1855 1875
1856 if (atl1e_tx_map(adapter, skb, tpd)) 1876 if (atl1e_tx_map(adapter, skb, tpd)) {
1877 dev_kfree_skb_any(skb);
1857 goto out; 1878 goto out;
1879 }
1858 1880
1859 atl1e_tx_queue(adapter, tpd_req, tpd); 1881 atl1e_tx_queue(adapter, tpd_req, tpd);
1860 1882
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 1d680baf43d6..52c96036dcc4 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -131,6 +131,7 @@ config BNX2X_SRIOV
131config BGMAC 131config BGMAC
132 tristate "BCMA bus GBit core support" 132 tristate "BCMA bus GBit core support"
133 depends on BCMA_HOST_SOC && HAS_DMA 133 depends on BCMA_HOST_SOC && HAS_DMA
134 select PHYLIB
134 ---help--- 135 ---help---
135 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. 136 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
136 They can be found on BCM47xx SoCs and provide gigabit ethernet. 137 They can be found on BCM47xx SoCs and provide gigabit ethernet.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dedbd76c033e..d80e34b8285f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -486,7 +486,7 @@ struct bnx2x_fastpath {
486 486
487 struct napi_struct napi; 487 struct napi_struct napi;
488 488
489#ifdef CONFIG_NET_LL_RX_POLL 489#ifdef CONFIG_NET_RX_BUSY_POLL
490 unsigned int state; 490 unsigned int state;
491#define BNX2X_FP_STATE_IDLE 0 491#define BNX2X_FP_STATE_IDLE 0
492#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 492#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
@@ -498,7 +498,7 @@ struct bnx2x_fastpath {
498#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 498#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
499 /* protect state */ 499 /* protect state */
500 spinlock_t lock; 500 spinlock_t lock;
501#endif /* CONFIG_NET_LL_RX_POLL */ 501#endif /* CONFIG_NET_RX_BUSY_POLL */
502 502
503 union host_hc_status_block status_blk; 503 union host_hc_status_block status_blk;
504 /* chip independent shortcuts into sb structure */ 504 /* chip independent shortcuts into sb structure */
@@ -572,7 +572,7 @@ struct bnx2x_fastpath {
572#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) 572#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
573#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) 573#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
574 574
575#ifdef CONFIG_NET_LL_RX_POLL 575#ifdef CONFIG_NET_RX_BUSY_POLL
576static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 576static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
577{ 577{
578 spin_lock_init(&fp->lock); 578 spin_lock_init(&fp->lock);
@@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
680{ 680{
681 return false; 681 return false;
682} 682}
683#endif /* CONFIG_NET_LL_RX_POLL */ 683#endif /* CONFIG_NET_RX_BUSY_POLL */
684 684
685/* Use 2500 as a mini-jumbo MTU for FCoE */ 685/* Use 2500 as a mini-jumbo MTU for FCoE */
686#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 686#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ee350bde1818..f2d1ff10054b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3117,7 +3117,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
3117 return work_done; 3117 return work_done;
3118} 3118}
3119 3119
3120#ifdef CONFIG_NET_LL_RX_POLL 3120#ifdef CONFIG_NET_RX_BUSY_POLL
3121/* must be called with local_bh_disable()d */ 3121/* must be called with local_bh_disable()d */
3122int bnx2x_low_latency_recv(struct napi_struct *napi) 3122int bnx2x_low_latency_recv(struct napi_struct *napi)
3123{ 3123{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e5da07858a2f..e06186c305d8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12026,7 +12026,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
12026 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 12026 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
12027#endif 12027#endif
12028 12028
12029#ifdef CONFIG_NET_LL_RX_POLL 12029#ifdef CONFIG_NET_RX_BUSY_POLL
12030 .ndo_busy_poll = bnx2x_low_latency_recv, 12030 .ndo_busy_poll = bnx2x_low_latency_recv,
12031#endif 12031#endif
12032}; 12032};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d964f302ac94..ddebc7a5dda0 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17625,7 +17625,8 @@ err_out_free_res:
17625 pci_release_regions(pdev); 17625 pci_release_regions(pdev);
17626 17626
17627err_out_disable_pdev: 17627err_out_disable_pdev:
17628 pci_disable_device(pdev); 17628 if (pci_is_enabled(pdev))
17629 pci_disable_device(pdev);
17629 pci_set_drvdata(pdev, NULL); 17630 pci_set_drvdata(pdev, NULL);
17630 return err; 17631 return err;
17631} 17632}
@@ -17773,7 +17774,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17773 17774
17774 rtnl_lock(); 17775 rtnl_lock();
17775 17776
17776 if (!netif_running(netdev)) 17777 /* We probably don't have netdev yet */
17778 if (!netdev || !netif_running(netdev))
17777 goto done; 17779 goto done;
17778 17780
17779 tg3_phy_stop(tp); 17781 tg3_phy_stop(tp);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 2df48bb0f1ca..181edb522450 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -782,16 +782,22 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
782 782
783 if (vlan_tx_tag_present(skb)) 783 if (vlan_tx_tag_present(skb))
784 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid) 785
786 vlan_tag = adapter->pvid; 786 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787 if (!vlan_tag)
788 vlan_tag = adapter->pvid;
789 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790 * skip VLAN insertion
791 */
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
787 795
788 if (vlan_tag) { 796 if (vlan_tag) {
789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
790 if (unlikely(!skb)) 798 if (unlikely(!skb))
791 return skb; 799 return skb;
792 skb->vlan_tci = 0; 800 skb->vlan_tci = 0;
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 } 801 }
796 802
797 /* Insert the outer VLAN, if any */ 803 /* Insert the outer VLAN, if any */
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2b0a0ea4f8e7..ae236009f1a8 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -259,6 +259,7 @@ struct bufdesc_ex {
259struct fec_enet_delayed_work { 259struct fec_enet_delayed_work {
260 struct delayed_work delay_work; 260 struct delayed_work delay_work;
261 bool timeout; 261 bool timeout;
262 bool trig_tx;
262}; 263};
263 264
264/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 265/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d3ad5ea711d3..77ea0db0bbfc 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -93,6 +93,20 @@ static void set_multicast_list(struct net_device *ndev);
93#define FEC_QUIRK_HAS_CSUM (1 << 5) 93#define FEC_QUIRK_HAS_CSUM (1 << 5)
94/* Controller has hardware vlan support */ 94/* Controller has hardware vlan support */
95#define FEC_QUIRK_HAS_VLAN (1 << 6) 95#define FEC_QUIRK_HAS_VLAN (1 << 6)
96/* ENET IP errata ERR006358
97 *
98 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
99 * detected as not set during a prior frame transmission, then the
100 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
101 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
102 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
103 * detected as not set during a prior frame transmission, then the
104 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
105 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
106 * frames not being transmitted until there is a 0-to-1 transition on
107 * ENET_TDAR[TDAR].
108 */
109#define FEC_QUIRK_ERR006358 (1 << 7)
96 110
97static struct platform_device_id fec_devtype[] = { 111static struct platform_device_id fec_devtype[] = {
98 { 112 {
@@ -112,7 +126,7 @@ static struct platform_device_id fec_devtype[] = {
112 .name = "imx6q-fec", 126 .name = "imx6q-fec",
113 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 127 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
114 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 128 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
115 FEC_QUIRK_HAS_VLAN, 129 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
116 }, { 130 }, {
117 .name = "mvf600-fec", 131 .name = "mvf600-fec",
118 .driver_data = FEC_QUIRK_ENET_MAC, 132 .driver_data = FEC_QUIRK_ENET_MAC,
@@ -275,16 +289,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
275 struct fec_enet_private *fep = netdev_priv(ndev); 289 struct fec_enet_private *fep = netdev_priv(ndev);
276 const struct platform_device_id *id_entry = 290 const struct platform_device_id *id_entry =
277 platform_get_device_id(fep->pdev); 291 platform_get_device_id(fep->pdev);
278 struct bufdesc *bdp; 292 struct bufdesc *bdp, *bdp_pre;
279 void *bufaddr; 293 void *bufaddr;
280 unsigned short status; 294 unsigned short status;
281 unsigned int index; 295 unsigned int index;
282 296
283 if (!fep->link) {
284 /* Link is down or auto-negotiation is in progress. */
285 return NETDEV_TX_BUSY;
286 }
287
288 /* Fill in a Tx ring entry */ 297 /* Fill in a Tx ring entry */
289 bdp = fep->cur_tx; 298 bdp = fep->cur_tx;
290 299
@@ -370,6 +379,15 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
370 ebdp->cbd_esc |= BD_ENET_TX_PINS; 379 ebdp->cbd_esc |= BD_ENET_TX_PINS;
371 } 380 }
372 } 381 }
382
383 bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
384 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
385 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
386 fep->delay_work.trig_tx = true;
387 schedule_delayed_work(&(fep->delay_work.delay_work),
388 msecs_to_jiffies(1));
389 }
390
373 /* If this was the last BD in the ring, start at the beginning again. */ 391 /* If this was the last BD in the ring, start at the beginning again. */
374 if (status & BD_ENET_TX_WRAP) 392 if (status & BD_ENET_TX_WRAP)
375 bdp = fep->tx_bd_base; 393 bdp = fep->tx_bd_base;
@@ -689,6 +707,11 @@ static void fec_enet_work(struct work_struct *work)
689 fec_restart(fep->netdev, fep->full_duplex); 707 fec_restart(fep->netdev, fep->full_duplex);
690 netif_wake_queue(fep->netdev); 708 netif_wake_queue(fep->netdev);
691 } 709 }
710
711 if (fep->delay_work.trig_tx) {
712 fep->delay_work.trig_tx = false;
713 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
714 }
692} 715}
693 716
694static void 717static void
@@ -2279,4 +2302,5 @@ static struct platform_driver fec_driver = {
2279 2302
2280module_platform_driver(fec_driver); 2303module_platform_driver(fec_driver);
2281 2304
2305MODULE_ALIAS("platform:"DRIVER_NAME);
2282MODULE_LICENSE("GPL"); 2306MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6a0c1b66ce54..c1d72c03cb59 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3739,9 +3739,8 @@ static void igb_set_rx_mode(struct net_device *netdev)
3739 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); 3739 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3740 3740
3741 if (netdev->flags & IFF_PROMISC) { 3741 if (netdev->flags & IFF_PROMISC) {
3742 u32 mrqc = rd32(E1000_MRQC);
3743 /* retain VLAN HW filtering if in VT mode */ 3742 /* retain VLAN HW filtering if in VT mode */
3744 if (mrqc & E1000_MRQC_ENABLE_VMDQ) 3743 if (adapter->vfs_allocated_count)
3745 rctl |= E1000_RCTL_VFE; 3744 rctl |= E1000_RCTL_VFE;
3746 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3745 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3747 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); 3746 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7be725cdfea8..a6494e5daffe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -54,7 +54,7 @@
54 54
55#include <net/busy_poll.h> 55#include <net/busy_poll.h>
56 56
57#ifdef CONFIG_NET_LL_RX_POLL 57#ifdef CONFIG_NET_RX_BUSY_POLL
58#define LL_EXTENDED_STATS 58#define LL_EXTENDED_STATS
59#endif 59#endif
60/* common prefix used by pr_<> macros */ 60/* common prefix used by pr_<> macros */
@@ -366,7 +366,7 @@ struct ixgbe_q_vector {
366 struct rcu_head rcu; /* to avoid race with update stats on free */ 366 struct rcu_head rcu; /* to avoid race with update stats on free */
367 char name[IFNAMSIZ + 9]; 367 char name[IFNAMSIZ + 9];
368 368
369#ifdef CONFIG_NET_LL_RX_POLL 369#ifdef CONFIG_NET_RX_BUSY_POLL
370 unsigned int state; 370 unsigned int state;
371#define IXGBE_QV_STATE_IDLE 0 371#define IXGBE_QV_STATE_IDLE 0
372#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ 372#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
@@ -377,12 +377,12 @@ struct ixgbe_q_vector {
377#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) 377#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
378#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) 378#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
379 spinlock_t lock; 379 spinlock_t lock;
380#endif /* CONFIG_NET_LL_RX_POLL */ 380#endif /* CONFIG_NET_RX_BUSY_POLL */
381 381
382 /* for dynamic allocation of rings associated with this q_vector */ 382 /* for dynamic allocation of rings associated with this q_vector */
383 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 383 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
384}; 384};
385#ifdef CONFIG_NET_LL_RX_POLL 385#ifdef CONFIG_NET_RX_BUSY_POLL
386static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) 386static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
387{ 387{
388 388
@@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); 462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
463 return q_vector->state & IXGBE_QV_USER_PEND; 463 return q_vector->state & IXGBE_QV_USER_PEND;
464} 464}
465#else /* CONFIG_NET_LL_RX_POLL */ 465#else /* CONFIG_NET_RX_BUSY_POLL */
466static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) 466static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
467{ 467{
468} 468}
@@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
491{ 491{
492 return false; 492 return false;
493} 493}
494#endif /* CONFIG_NET_LL_RX_POLL */ 494#endif /* CONFIG_NET_RX_BUSY_POLL */
495 495
496#ifdef CONFIG_IXGBE_HWMON 496#ifdef CONFIG_IXGBE_HWMON
497 497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index ac780770863d..7a77f37a7cbc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
108 108
109 /* Enable arbiter */ 109 /* Enable arbiter */
110 reg &= ~IXGBE_DPMCS_ARBDIS; 110 reg &= ~IXGBE_DPMCS_ARBDIS;
111 /* Enable DFP and Recycle mode */
112 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
113 reg |= IXGBE_DPMCS_TSOEF; 111 reg |= IXGBE_DPMCS_TSOEF;
112
114 /* Configure Max TSO packet size 34KB including payload and headers */ 113 /* Configure Max TSO packet size 34KB including payload and headers */
115 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); 114 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
116 115
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bad8f14b1941..be4b1fb3d0d2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1998,7 +1998,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1998 return total_rx_packets; 1998 return total_rx_packets;
1999} 1999}
2000 2000
2001#ifdef CONFIG_NET_LL_RX_POLL 2001#ifdef CONFIG_NET_RX_BUSY_POLL
2002/* must be called with local_bh_disable()d */ 2002/* must be called with local_bh_disable()d */
2003static int ixgbe_low_latency_recv(struct napi_struct *napi) 2003static int ixgbe_low_latency_recv(struct napi_struct *napi)
2004{ 2004{
@@ -2030,7 +2030,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
2030 2030
2031 return found; 2031 return found;
2032} 2032}
2033#endif /* CONFIG_NET_LL_RX_POLL */ 2033#endif /* CONFIG_NET_RX_BUSY_POLL */
2034 2034
2035/** 2035/**
2036 * ixgbe_configure_msix - Configure MSI-X hardware 2036 * ixgbe_configure_msix - Configure MSI-X hardware
@@ -7227,7 +7227,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7227#ifdef CONFIG_NET_POLL_CONTROLLER 7227#ifdef CONFIG_NET_POLL_CONTROLLER
7228 .ndo_poll_controller = ixgbe_netpoll, 7228 .ndo_poll_controller = ixgbe_netpoll,
7229#endif 7229#endif
7230#ifdef CONFIG_NET_LL_RX_POLL 7230#ifdef CONFIG_NET_RX_BUSY_POLL
7231 .ndo_busy_poll = ixgbe_low_latency_recv, 7231 .ndo_busy_poll = ixgbe_low_latency_recv,
7232#endif 7232#endif
7233#ifdef IXGBE_FCOE 7233#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 712779fb12b7..b017818bccae 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -88,6 +88,8 @@
88#define MVNETA_TX_IN_PRGRS BIT(1) 88#define MVNETA_TX_IN_PRGRS BIT(1)
89#define MVNETA_TX_FIFO_EMPTY BIT(8) 89#define MVNETA_TX_FIFO_EMPTY BIT(8)
90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c 90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
91#define MVNETA_SGMII_SERDES_CFG 0x24A0
92#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
91#define MVNETA_TYPE_PRIO 0x24bc 93#define MVNETA_TYPE_PRIO 0x24bc
92#define MVNETA_FORCE_UNI BIT(21) 94#define MVNETA_FORCE_UNI BIT(21)
93#define MVNETA_TXQ_CMD_1 0x24e4 95#define MVNETA_TXQ_CMD_1 0x24e4
@@ -655,6 +657,8 @@ static void mvneta_port_sgmii_config(struct mvneta_port *pp)
655 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 657 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
656 val |= MVNETA_GMAC2_PSC_ENABLE; 658 val |= MVNETA_GMAC2_PSC_ENABLE;
657 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 659 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
660
661 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
658} 662}
659 663
660/* Start the Ethernet port RX and TX activity */ 664/* Start the Ethernet port RX and TX activity */
@@ -2728,28 +2732,24 @@ static int mvneta_probe(struct platform_device *pdev)
2728 2732
2729 pp = netdev_priv(dev); 2733 pp = netdev_priv(dev);
2730 2734
2731 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2732 init_timer(&pp->tx_done_timer);
2733 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2734
2735 pp->weight = MVNETA_RX_POLL_WEIGHT; 2735 pp->weight = MVNETA_RX_POLL_WEIGHT;
2736 pp->phy_node = phy_node; 2736 pp->phy_node = phy_node;
2737 pp->phy_interface = phy_mode; 2737 pp->phy_interface = phy_mode;
2738 2738
2739 pp->base = of_iomap(dn, 0);
2740 if (pp->base == NULL) {
2741 err = -ENOMEM;
2742 goto err_free_irq;
2743 }
2744
2745 pp->clk = devm_clk_get(&pdev->dev, NULL); 2739 pp->clk = devm_clk_get(&pdev->dev, NULL);
2746 if (IS_ERR(pp->clk)) { 2740 if (IS_ERR(pp->clk)) {
2747 err = PTR_ERR(pp->clk); 2741 err = PTR_ERR(pp->clk);
2748 goto err_unmap; 2742 goto err_free_irq;
2749 } 2743 }
2750 2744
2751 clk_prepare_enable(pp->clk); 2745 clk_prepare_enable(pp->clk);
2752 2746
2747 pp->base = of_iomap(dn, 0);
2748 if (pp->base == NULL) {
2749 err = -ENOMEM;
2750 goto err_clk;
2751 }
2752
2753 dt_mac_addr = of_get_mac_address(dn); 2753 dt_mac_addr = of_get_mac_address(dn);
2754 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { 2754 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
2755 mac_from = "device tree"; 2755 mac_from = "device tree";
@@ -2766,6 +2766,9 @@ static int mvneta_probe(struct platform_device *pdev)
2766 } 2766 }
2767 2767
2768 pp->tx_done_timer.data = (unsigned long)dev; 2768 pp->tx_done_timer.data = (unsigned long)dev;
2769 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2770 init_timer(&pp->tx_done_timer);
2771 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2769 2772
2770 pp->tx_ring_size = MVNETA_MAX_TXD; 2773 pp->tx_ring_size = MVNETA_MAX_TXD;
2771 pp->rx_ring_size = MVNETA_MAX_RXD; 2774 pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2776,7 +2779,7 @@ static int mvneta_probe(struct platform_device *pdev)
2776 err = mvneta_init(pp, phy_addr); 2779 err = mvneta_init(pp, phy_addr);
2777 if (err < 0) { 2780 if (err < 0) {
2778 dev_err(&pdev->dev, "can't init eth hal\n"); 2781 dev_err(&pdev->dev, "can't init eth hal\n");
2779 goto err_clk; 2782 goto err_unmap;
2780 } 2783 }
2781 mvneta_port_power_up(pp, phy_mode); 2784 mvneta_port_power_up(pp, phy_mode);
2782 2785
@@ -2806,10 +2809,10 @@ static int mvneta_probe(struct platform_device *pdev)
2806 2809
2807err_deinit: 2810err_deinit:
2808 mvneta_deinit(pp); 2811 mvneta_deinit(pp);
2809err_clk:
2810 clk_disable_unprepare(pp->clk);
2811err_unmap: 2812err_unmap:
2812 iounmap(pp->base); 2813 iounmap(pp->base);
2814err_clk:
2815 clk_disable_unprepare(pp->clk);
2813err_free_irq: 2816err_free_irq:
2814 irq_dispose_mapping(dev->irq); 2817 irq_dispose_mapping(dev->irq);
2815err_free_netdev: 2818err_free_netdev:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 727874f575ce..a28cd801a236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
223 case ETH_SS_STATS: 223 case ETH_SS_STATS:
224 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + 224 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
225 (priv->tx_ring_num * 2) + 225 (priv->tx_ring_num * 2) +
226#ifdef CONFIG_NET_LL_RX_POLL 226#ifdef CONFIG_NET_RX_BUSY_POLL
227 (priv->rx_ring_num * 5); 227 (priv->rx_ring_num * 5);
228#else 228#else
229 (priv->rx_ring_num * 2); 229 (priv->rx_ring_num * 2);
@@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
276 for (i = 0; i < priv->rx_ring_num; i++) { 276 for (i = 0; i < priv->rx_ring_num; i++) {
277 data[index++] = priv->rx_ring[i].packets; 277 data[index++] = priv->rx_ring[i].packets;
278 data[index++] = priv->rx_ring[i].bytes; 278 data[index++] = priv->rx_ring[i].bytes;
279#ifdef CONFIG_NET_LL_RX_POLL 279#ifdef CONFIG_NET_RX_BUSY_POLL
280 data[index++] = priv->rx_ring[i].yields; 280 data[index++] = priv->rx_ring[i].yields;
281 data[index++] = priv->rx_ring[i].misses; 281 data[index++] = priv->rx_ring[i].misses;
282 data[index++] = priv->rx_ring[i].cleaned; 282 data[index++] = priv->rx_ring[i].cleaned;
@@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
344 "rx%d_packets", i); 344 "rx%d_packets", i);
345 sprintf(data + (index++) * ETH_GSTRING_LEN, 345 sprintf(data + (index++) * ETH_GSTRING_LEN,
346 "rx%d_bytes", i); 346 "rx%d_bytes", i);
347#ifdef CONFIG_NET_LL_RX_POLL 347#ifdef CONFIG_NET_RX_BUSY_POLL
348 sprintf(data + (index++) * ETH_GSTRING_LEN, 348 sprintf(data + (index++) * ETH_GSTRING_LEN,
349 "rx%d_napi_yield", i); 349 "rx%d_napi_yield", i);
350 sprintf(data + (index++) * ETH_GSTRING_LEN, 350 sprintf(data + (index++) * ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5eac871399d8..fa37b7a61213 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
68 return 0; 68 return 0;
69} 69}
70 70
71#ifdef CONFIG_NET_LL_RX_POLL 71#ifdef CONFIG_NET_RX_BUSY_POLL
72/* must be called with local_bh_disable()d */ 72/* must be called with local_bh_disable()d */
73static int mlx4_en_low_latency_recv(struct napi_struct *napi) 73static int mlx4_en_low_latency_recv(struct napi_struct *napi)
74{ 74{
@@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
94 94
95 return done; 95 return done;
96} 96}
97#endif /* CONFIG_NET_LL_RX_POLL */ 97#endif /* CONFIG_NET_RX_BUSY_POLL */
98 98
99#ifdef CONFIG_RFS_ACCEL 99#ifdef CONFIG_RFS_ACCEL
100 100
@@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2140#ifdef CONFIG_RFS_ACCEL 2140#ifdef CONFIG_RFS_ACCEL
2141 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2141 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2142#endif 2142#endif
2143#ifdef CONFIG_NET_LL_RX_POLL 2143#ifdef CONFIG_NET_RX_BUSY_POLL
2144 .ndo_busy_poll = mlx4_en_low_latency_recv, 2144 .ndo_busy_poll = mlx4_en_low_latency_recv,
2145#endif 2145#endif
2146}; 2146};
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 8873d6802c80..6fc6dabc78d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -845,16 +845,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
845 MLX4_CMD_NATIVE); 845 MLX4_CMD_NATIVE);
846 846
847 if (!err && dev->caps.function != slave) { 847 if (!err && dev->caps.function != slave) {
848 /* if config MAC in DB use it */ 848 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
849 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
850 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
851 else {
852 /* set slave default_mac address */
853 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
854 def_mac += slave << 8;
855 priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
856 }
857
858 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 849 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
859 850
860 /* get port type - currently only eth is enabled */ 851 /* get port type - currently only eth is enabled */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e85af922dcdc..36be3208786a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -371,7 +371,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
371 371
372 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 372 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
373 373
374 if (!enable_64b_cqe_eqe) { 374 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
375 if (dev_cap->flags & 375 if (dev_cap->flags &
376 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 376 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
377 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 377 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35fb60e2320c..5e0aa569306a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -292,7 +292,7 @@ struct mlx4_en_rx_ring {
292 void *rx_info; 292 void *rx_info;
293 unsigned long bytes; 293 unsigned long bytes;
294 unsigned long packets; 294 unsigned long packets;
295#ifdef CONFIG_NET_LL_RX_POLL 295#ifdef CONFIG_NET_RX_BUSY_POLL
296 unsigned long yields; 296 unsigned long yields;
297 unsigned long misses; 297 unsigned long misses;
298 unsigned long cleaned; 298 unsigned long cleaned;
@@ -318,7 +318,7 @@ struct mlx4_en_cq {
318 struct mlx4_cqe *buf; 318 struct mlx4_cqe *buf;
319#define MLX4_EN_OPCODE_ERROR 0x1e 319#define MLX4_EN_OPCODE_ERROR 0x1e
320 320
321#ifdef CONFIG_NET_LL_RX_POLL 321#ifdef CONFIG_NET_RX_BUSY_POLL
322 unsigned int state; 322 unsigned int state;
323#define MLX4_EN_CQ_STATE_IDLE 0 323#define MLX4_EN_CQ_STATE_IDLE 0
324#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ 324#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
@@ -329,7 +329,7 @@ struct mlx4_en_cq {
329#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) 329#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
330#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) 330#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
331 spinlock_t poll_lock; /* protects from LLS/napi conflicts */ 331 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
332#endif /* CONFIG_NET_LL_RX_POLL */ 332#endif /* CONFIG_NET_RX_BUSY_POLL */
333}; 333};
334 334
335struct mlx4_en_port_profile { 335struct mlx4_en_port_profile {
@@ -580,7 +580,7 @@ struct mlx4_mac_entry {
580 struct rcu_head rcu; 580 struct rcu_head rcu;
581}; 581};
582 582
583#ifdef CONFIG_NET_LL_RX_POLL 583#ifdef CONFIG_NET_RX_BUSY_POLL
584static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) 584static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
585{ 585{
586 spin_lock_init(&cq->poll_lock); 586 spin_lock_init(&cq->poll_lock);
@@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
687{ 687{
688 return false; 688 return false;
689} 689}
690#endif /* CONFIG_NET_LL_RX_POLL */ 690#endif /* CONFIG_NET_RX_BUSY_POLL */
691 691
692#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) 692#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
693 693
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 205753a04cfc..c571de85d0f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
46#include "mlx5_core.h" 46#include "mlx5_core.h"
47 47
48enum { 48enum {
49 CMD_IF_REV = 3, 49 CMD_IF_REV = 4,
50}; 50};
51 51
52enum { 52enum {
@@ -282,6 +282,12 @@ const char *mlx5_command_str(int command)
282 case MLX5_CMD_OP_TEARDOWN_HCA: 282 case MLX5_CMD_OP_TEARDOWN_HCA:
283 return "TEARDOWN_HCA"; 283 return "TEARDOWN_HCA";
284 284
285 case MLX5_CMD_OP_ENABLE_HCA:
286 return "MLX5_CMD_OP_ENABLE_HCA";
287
288 case MLX5_CMD_OP_DISABLE_HCA:
289 return "MLX5_CMD_OP_DISABLE_HCA";
290
285 case MLX5_CMD_OP_QUERY_PAGES: 291 case MLX5_CMD_OP_QUERY_PAGES:
286 return "QUERY_PAGES"; 292 return "QUERY_PAGES";
287 293
@@ -1113,7 +1119,13 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1113 1119
1114 for (i = 0; i < (1 << cmd->log_sz); i++) { 1120 for (i = 0; i < (1 << cmd->log_sz); i++) {
1115 if (test_bit(i, &vector)) { 1121 if (test_bit(i, &vector)) {
1122 struct semaphore *sem;
1123
1116 ent = cmd->ent_arr[i]; 1124 ent = cmd->ent_arr[i];
1125 if (ent->page_queue)
1126 sem = &cmd->pages_sem;
1127 else
1128 sem = &cmd->sem;
1117 ktime_get_ts(&ent->ts2); 1129 ktime_get_ts(&ent->ts2);
1118 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1130 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1119 dump_command(dev, ent, 0); 1131 dump_command(dev, ent, 0);
@@ -1136,10 +1148,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1136 } else { 1148 } else {
1137 complete(&ent->done); 1149 complete(&ent->done);
1138 } 1150 }
1139 if (ent->page_queue) 1151 up(sem);
1140 up(&cmd->pages_sem);
1141 else
1142 up(&cmd->sem);
1143 } 1152 }
1144 } 1153 }
1145} 1154}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 4273c06e2e96..9c7194b26ee2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -156,7 +156,7 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
156 stats = filp->private_data; 156 stats = filp->private_data;
157 spin_lock(&stats->lock); 157 spin_lock(&stats->lock);
158 if (stats->n) 158 if (stats->n)
159 field = stats->sum / stats->n; 159 field = div64_u64(stats->sum, stats->n);
160 spin_unlock(&stats->lock); 160 spin_unlock(&stats->lock);
161 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); 161 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
162 if (ret > 0) { 162 if (ret > 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 12242de2b0e3..b47739b0b5f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
249 return err; 249 return err;
250} 250}
251 251
252static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
253{
254 int err;
255 struct mlx5_enable_hca_mbox_in in;
256 struct mlx5_enable_hca_mbox_out out;
257
258 memset(&in, 0, sizeof(in));
259 memset(&out, 0, sizeof(out));
260 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
261 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
262 if (err)
263 return err;
264
265 if (out.hdr.status)
266 return mlx5_cmd_status_to_err(&out.hdr);
267
268 return 0;
269}
270
271static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
272{
273 int err;
274 struct mlx5_disable_hca_mbox_in in;
275 struct mlx5_disable_hca_mbox_out out;
276
277 memset(&in, 0, sizeof(in));
278 memset(&out, 0, sizeof(out));
279 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
280 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
281 if (err)
282 return err;
283
284 if (out.hdr.status)
285 return mlx5_cmd_status_to_err(&out.hdr);
286
287 return 0;
288}
289
252int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) 290int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
253{ 291{
254 struct mlx5_priv *priv = &dev->priv; 292 struct mlx5_priv *priv = &dev->priv;
@@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
304 } 342 }
305 343
306 mlx5_pagealloc_init(dev); 344 mlx5_pagealloc_init(dev);
345
346 err = mlx5_core_enable_hca(dev);
347 if (err) {
348 dev_err(&pdev->dev, "enable hca failed\n");
349 goto err_pagealloc_cleanup;
350 }
351
352 err = mlx5_satisfy_startup_pages(dev, 1);
353 if (err) {
354 dev_err(&pdev->dev, "failed to allocate boot pages\n");
355 goto err_disable_hca;
356 }
357
307 err = set_hca_ctrl(dev); 358 err = set_hca_ctrl(dev);
308 if (err) { 359 if (err) {
309 dev_err(&pdev->dev, "set_hca_ctrl failed\n"); 360 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
310 goto err_pagealloc_cleanup; 361 goto reclaim_boot_pages;
311 } 362 }
312 363
313 err = handle_hca_cap(dev); 364 err = handle_hca_cap(dev);
314 if (err) { 365 if (err) {
315 dev_err(&pdev->dev, "handle_hca_cap failed\n"); 366 dev_err(&pdev->dev, "handle_hca_cap failed\n");
316 goto err_pagealloc_cleanup; 367 goto reclaim_boot_pages;
317 } 368 }
318 369
319 err = mlx5_satisfy_startup_pages(dev); 370 err = mlx5_satisfy_startup_pages(dev, 0);
320 if (err) { 371 if (err) {
321 dev_err(&pdev->dev, "failed to allocate startup pages\n"); 372 dev_err(&pdev->dev, "failed to allocate init pages\n");
322 goto err_pagealloc_cleanup; 373 goto reclaim_boot_pages;
323 } 374 }
324 375
325 err = mlx5_pagealloc_start(dev); 376 err = mlx5_pagealloc_start(dev);
326 if (err) { 377 if (err) {
327 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); 378 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
328 goto err_reclaim_pages; 379 goto reclaim_boot_pages;
329 } 380 }
330 381
331 err = mlx5_cmd_init_hca(dev); 382 err = mlx5_cmd_init_hca(dev);
@@ -396,9 +447,12 @@ err_stop_poll:
396err_pagealloc_stop: 447err_pagealloc_stop:
397 mlx5_pagealloc_stop(dev); 448 mlx5_pagealloc_stop(dev);
398 449
399err_reclaim_pages: 450reclaim_boot_pages:
400 mlx5_reclaim_startup_pages(dev); 451 mlx5_reclaim_startup_pages(dev);
401 452
453err_disable_hca:
454 mlx5_core_disable_hca(dev);
455
402err_pagealloc_cleanup: 456err_pagealloc_cleanup:
403 mlx5_pagealloc_cleanup(dev); 457 mlx5_pagealloc_cleanup(dev);
404 mlx5_cmd_cleanup(dev); 458 mlx5_cmd_cleanup(dev);
@@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
434 mlx5_cmd_teardown_hca(dev); 488 mlx5_cmd_teardown_hca(dev);
435 mlx5_pagealloc_stop(dev); 489 mlx5_pagealloc_stop(dev);
436 mlx5_reclaim_startup_pages(dev); 490 mlx5_reclaim_startup_pages(dev);
491 mlx5_core_disable_hca(dev);
437 mlx5_pagealloc_cleanup(dev); 492 mlx5_pagealloc_cleanup(dev);
438 mlx5_cmd_cleanup(dev); 493 mlx5_cmd_cleanup(dev);
439 iounmap(dev->iseg); 494 iounmap(dev->iseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index f0bf46339b28..4a3e137931a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -64,7 +64,7 @@ struct mlx5_query_pages_inbox {
64 64
65struct mlx5_query_pages_outbox { 65struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr; 66 struct mlx5_outbox_hdr hdr;
67 u8 reserved[2]; 67 __be16 num_boot_pages;
68 __be16 func_id; 68 __be16 func_id;
69 __be16 init_pages; 69 __be16 init_pages;
70 __be16 num_pages; 70 __be16 num_pages;
@@ -146,7 +146,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
146} 146}
147 147
148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, 148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages) 149 s16 *pages, s16 *init_pages, u16 *boot_pages)
150{ 150{
151 struct mlx5_query_pages_inbox in; 151 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out; 152 struct mlx5_query_pages_outbox out;
@@ -164,8 +164,13 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
164 164
165 if (pages) 165 if (pages)
166 *pages = be16_to_cpu(out.num_pages); 166 *pages = be16_to_cpu(out.num_pages);
167
167 if (init_pages) 168 if (init_pages)
168 *init_pages = be16_to_cpu(out.init_pages); 169 *init_pages = be16_to_cpu(out.init_pages);
170
171 if (boot_pages)
172 *boot_pages = be16_to_cpu(out.num_boot_pages);
173
169 *func_id = be16_to_cpu(out.func_id); 174 *func_id = be16_to_cpu(out.func_id);
170 175
171 return err; 176 return err;
@@ -357,19 +362,22 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
357 queue_work(dev->priv.pg_wq, &req->work); 362 queue_work(dev->priv.pg_wq, &req->work);
358} 363}
359 364
360int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) 365int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
361{ 366{
367 u16 uninitialized_var(boot_pages);
362 s16 uninitialized_var(init_pages); 368 s16 uninitialized_var(init_pages);
363 u16 uninitialized_var(func_id); 369 u16 uninitialized_var(func_id);
364 int err; 370 int err;
365 371
366 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); 372 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
373 &boot_pages);
367 if (err) 374 if (err)
368 return err; 375 return err;
369 376
370 mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
371 377
372 return give_pages(dev, func_id, init_pages, 0); 378 mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
379 init_pages, boot_pages, func_id);
380 return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
373} 381}
374 382
375static int optimal_reclaimed_pages(void) 383static int optimal_reclaimed_pages(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 71d4a3937200..68f5d9c77c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -164,6 +164,7 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
164 uuari->uars[i].map = ioremap(addr, PAGE_SIZE); 164 uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
165 if (!uuari->uars[i].map) { 165 if (!uuari->uars[i].map) {
166 mlx5_cmd_free_uar(dev, uuari->uars[i].index); 166 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
167 err = -ENOMEM;
167 goto out_count; 168 goto out_count;
168 } 169 }
169 mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", 170 mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index cb22341a14a8..a588ffde9700 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
4 4
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI 7 depends on PCI && (X86 || COMPILE_TEST)
8 select MII 8 select MII
9 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
10 ---help--- 10 ---help---
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b00cf5665eab..221645e9f182 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1400,8 +1400,8 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
1400#define ADDR_IN_RANGE(addr, low, high) \ 1400#define ADDR_IN_RANGE(addr, low, high) \
1401 (((addr) < (high)) && ((addr) >= (low))) 1401 (((addr) < (high)) && ((addr) >= (low)))
1402 1402
1403#define QLCRD32(adapter, off) \ 1403#define QLCRD32(adapter, off, err) \
1404 (adapter->ahw->hw_ops->read_reg)(adapter, off) 1404 (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
1405 1405
1406#define QLCWR32(adapter, off, val) \ 1406#define QLCWR32(adapter, off, val) \
1407 adapter->ahw->hw_ops->write_reg(adapter, off, val) 1407 adapter->ahw->hw_ops->write_reg(adapter, off, val)
@@ -1604,7 +1604,7 @@ struct qlcnic_nic_template {
1604struct qlcnic_hardware_ops { 1604struct qlcnic_hardware_ops {
1605 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); 1605 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
1606 void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); 1606 void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
1607 int (*read_reg) (struct qlcnic_adapter *, ulong); 1607 int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
1608 int (*write_reg) (struct qlcnic_adapter *, ulong, u32); 1608 int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
1609 void (*get_ocm_win) (struct qlcnic_hardware_context *); 1609 void (*get_ocm_win) (struct qlcnic_hardware_context *);
1610 int (*get_mac_address) (struct qlcnic_adapter *, u8 *); 1610 int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
@@ -1662,12 +1662,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
1662 adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size); 1662 adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
1663} 1663}
1664 1664
1665static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
1666 ulong off)
1667{
1668 return adapter->ahw->hw_ops->read_reg(adapter, off);
1669}
1670
1671static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, 1665static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
1672 ulong off, u32 data) 1666 ulong off, u32 data)
1673{ 1667{
@@ -1869,7 +1863,8 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
1869 1863
1870static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) 1864static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
1871{ 1865{
1872 adapter->ahw->hw_ops->set_mac_filter_count(adapter); 1866 if (adapter->ahw->hw_ops->set_mac_filter_count)
1867 adapter->ahw->hw_ops->set_mac_filter_count(adapter);
1873} 1868}
1874 1869
1875static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 1870static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 0913c623a67e..92da9980a0a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -228,17 +228,17 @@ static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
228 return 0; 228 return 0;
229} 229}
230 230
231int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr) 231int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
232 int *err)
232{ 233{
233 int ret;
234 struct qlcnic_hardware_context *ahw = adapter->ahw; 234 struct qlcnic_hardware_context *ahw = adapter->ahw;
235 235
236 ret = __qlcnic_set_win_base(adapter, (u32) addr); 236 *err = __qlcnic_set_win_base(adapter, (u32) addr);
237 if (!ret) { 237 if (!*err) {
238 return QLCRDX(ahw, QLCNIC_WILDCARD); 238 return QLCRDX(ahw, QLCNIC_WILDCARD);
239 } else { 239 } else {
240 dev_err(&adapter->pdev->dev, 240 dev_err(&adapter->pdev->dev,
241 "%s failed, addr = 0x%x\n", __func__, (int)addr); 241 "%s failed, addr = 0x%lx\n", __func__, addr);
242 return -EIO; 242 return -EIO;
243 } 243 }
244} 244}
@@ -561,7 +561,7 @@ void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
561void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, 561void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
562 loff_t offset, size_t size) 562 loff_t offset, size_t size)
563{ 563{
564 int ret; 564 int ret = 0;
565 u32 data; 565 u32 data;
566 566
567 if (qlcnic_api_lock(adapter)) { 567 if (qlcnic_api_lock(adapter)) {
@@ -571,7 +571,7 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
571 return; 571 return;
572 } 572 }
573 573
574 ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset); 574 data = QLCRD32(adapter, (u32) offset, &ret);
575 qlcnic_api_unlock(adapter); 575 qlcnic_api_unlock(adapter);
576 576
577 if (ret == -EIO) { 577 if (ret == -EIO) {
@@ -580,7 +580,6 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
580 __func__, (u32)offset); 580 __func__, (u32)offset);
581 return; 581 return;
582 } 582 }
583 data = ret;
584 memcpy(buf, &data, size); 583 memcpy(buf, &data, size);
585} 584}
586 585
@@ -2075,18 +2074,25 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
2075static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, 2074static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2076 u32 data[]) 2075 u32 data[])
2077{ 2076{
2077 struct qlcnic_hardware_context *ahw = adapter->ahw;
2078 u8 link_status, duplex; 2078 u8 link_status, duplex;
2079 /* link speed */ 2079 /* link speed */
2080 link_status = LSB(data[3]) & 1; 2080 link_status = LSB(data[3]) & 1;
2081 adapter->ahw->link_speed = MSW(data[2]); 2081 if (link_status) {
2082 adapter->ahw->link_autoneg = MSB(MSW(data[3])); 2082 ahw->link_speed = MSW(data[2]);
2083 adapter->ahw->module_type = MSB(LSW(data[3])); 2083 duplex = LSB(MSW(data[3]));
2084 duplex = LSB(MSW(data[3])); 2084 if (duplex)
2085 if (duplex) 2085 ahw->link_duplex = DUPLEX_FULL;
2086 adapter->ahw->link_duplex = DUPLEX_FULL; 2086 else
2087 else 2087 ahw->link_duplex = DUPLEX_HALF;
2088 adapter->ahw->link_duplex = DUPLEX_HALF; 2088 } else {
2089 adapter->ahw->has_link_events = 1; 2089 ahw->link_speed = SPEED_UNKNOWN;
2090 ahw->link_duplex = DUPLEX_UNKNOWN;
2091 }
2092
2093 ahw->link_autoneg = MSB(MSW(data[3]));
2094 ahw->module_type = MSB(LSW(data[3]));
2095 ahw->has_link_events = 1;
2090 qlcnic_advert_link_change(adapter, link_status); 2096 qlcnic_advert_link_change(adapter, link_status);
2091} 2097}
2092 2098
@@ -2384,9 +2390,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
2384 u32 flash_addr, u8 *p_data, 2390 u32 flash_addr, u8 *p_data,
2385 int count) 2391 int count)
2386{ 2392{
2387 int i, ret; 2393 u32 word, range, flash_offset, addr = flash_addr, ret;
2388 u32 word, range, flash_offset, addr = flash_addr;
2389 ulong indirect_add, direct_window; 2394 ulong indirect_add, direct_window;
2395 int i, err = 0;
2390 2396
2391 flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1); 2397 flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
2392 if (addr & 0x3) { 2398 if (addr & 0x3) {
@@ -2404,10 +2410,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
2404 /* Multi sector read */ 2410 /* Multi sector read */
2405 for (i = 0; i < count; i++) { 2411 for (i = 0; i < count; i++) {
2406 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); 2412 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
2407 ret = qlcnic_83xx_rd_reg_indirect(adapter, 2413 ret = QLCRD32(adapter, indirect_add, &err);
2408 indirect_add); 2414 if (err == -EIO)
2409 if (ret == -EIO) 2415 return err;
2410 return -EIO;
2411 2416
2412 word = ret; 2417 word = ret;
2413 *(u32 *)p_data = word; 2418 *(u32 *)p_data = word;
@@ -2428,10 +2433,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
2428 /* Single sector read */ 2433 /* Single sector read */
2429 for (i = 0; i < count; i++) { 2434 for (i = 0; i < count; i++) {
2430 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); 2435 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
2431 ret = qlcnic_83xx_rd_reg_indirect(adapter, 2436 ret = QLCRD32(adapter, indirect_add, &err);
2432 indirect_add); 2437 if (err == -EIO)
2433 if (ret == -EIO) 2438 return err;
2434 return -EIO;
2435 2439
2436 word = ret; 2440 word = ret;
2437 *(u32 *)p_data = word; 2441 *(u32 *)p_data = word;
@@ -2447,10 +2451,13 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
2447{ 2451{
2448 u32 status; 2452 u32 status;
2449 int retries = QLC_83XX_FLASH_READ_RETRY_COUNT; 2453 int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
2454 int err = 0;
2450 2455
2451 do { 2456 do {
2452 status = qlcnic_83xx_rd_reg_indirect(adapter, 2457 status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
2453 QLC_83XX_FLASH_STATUS); 2458 if (err == -EIO)
2459 return err;
2460
2454 if ((status & QLC_83XX_FLASH_STATUS_READY) == 2461 if ((status & QLC_83XX_FLASH_STATUS_READY) ==
2455 QLC_83XX_FLASH_STATUS_READY) 2462 QLC_83XX_FLASH_STATUS_READY)
2456 break; 2463 break;
@@ -2502,7 +2509,8 @@ int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
2502 2509
2503int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) 2510int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
2504{ 2511{
2505 int ret, mfg_id; 2512 int ret, err = 0;
2513 u32 mfg_id;
2506 2514
2507 if (qlcnic_83xx_lock_flash(adapter)) 2515 if (qlcnic_83xx_lock_flash(adapter))
2508 return -EIO; 2516 return -EIO;
@@ -2517,9 +2525,11 @@ int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
2517 return -EIO; 2525 return -EIO;
2518 } 2526 }
2519 2527
2520 mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); 2528 mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
2521 if (mfg_id == -EIO) 2529 if (err == -EIO) {
2522 return -EIO; 2530 qlcnic_83xx_unlock_flash(adapter);
2531 return err;
2532 }
2523 2533
2524 adapter->flash_mfg_id = (mfg_id & 0xFF); 2534 adapter->flash_mfg_id = (mfg_id & 0xFF);
2525 qlcnic_83xx_unlock_flash(adapter); 2535 qlcnic_83xx_unlock_flash(adapter);
@@ -2636,7 +2646,7 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2636 u32 *p_data, int count) 2646 u32 *p_data, int count)
2637{ 2647{
2638 u32 temp; 2648 u32 temp;
2639 int ret = -EIO; 2649 int ret = -EIO, err = 0;
2640 2650
2641 if ((count < QLC_83XX_FLASH_WRITE_MIN) || 2651 if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
2642 (count > QLC_83XX_FLASH_WRITE_MAX)) { 2652 (count > QLC_83XX_FLASH_WRITE_MAX)) {
@@ -2645,8 +2655,10 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2645 return -EIO; 2655 return -EIO;
2646 } 2656 }
2647 2657
2648 temp = qlcnic_83xx_rd_reg_indirect(adapter, 2658 temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
2649 QLC_83XX_FLASH_SPI_CONTROL); 2659 if (err == -EIO)
2660 return err;
2661
2650 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, 2662 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
2651 (temp | QLC_83XX_FLASH_SPI_CTRL)); 2663 (temp | QLC_83XX_FLASH_SPI_CTRL));
2652 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, 2664 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
@@ -2695,13 +2707,18 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2695 return -EIO; 2707 return -EIO;
2696 } 2708 }
2697 2709
2698 ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS); 2710 ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
2711 if (err == -EIO)
2712 return err;
2713
2699 if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) { 2714 if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
2700 dev_err(&adapter->pdev->dev, "%s: failed at %d\n", 2715 dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
2701 __func__, __LINE__); 2716 __func__, __LINE__);
2702 /* Operation failed, clear error bit */ 2717 /* Operation failed, clear error bit */
2703 temp = qlcnic_83xx_rd_reg_indirect(adapter, 2718 temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
2704 QLC_83XX_FLASH_SPI_CONTROL); 2719 if (err == -EIO)
2720 return err;
2721
2705 qlcnic_83xx_wrt_reg_indirect(adapter, 2722 qlcnic_83xx_wrt_reg_indirect(adapter,
2706 QLC_83XX_FLASH_SPI_CONTROL, 2723 QLC_83XX_FLASH_SPI_CONTROL,
2707 (temp | QLC_83XX_FLASH_SPI_CTRL)); 2724 (temp | QLC_83XX_FLASH_SPI_CTRL));
@@ -2823,6 +2840,7 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
2823{ 2840{
2824 int i, j, ret = 0; 2841 int i, j, ret = 0;
2825 u32 temp; 2842 u32 temp;
2843 int err = 0;
2826 2844
2827 /* Check alignment */ 2845 /* Check alignment */
2828 if (addr & 0xF) 2846 if (addr & 0xF)
@@ -2855,8 +2873,12 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
2855 QLCNIC_TA_WRITE_START); 2873 QLCNIC_TA_WRITE_START);
2856 2874
2857 for (j = 0; j < MAX_CTL_CHECK; j++) { 2875 for (j = 0; j < MAX_CTL_CHECK; j++) {
2858 temp = qlcnic_83xx_rd_reg_indirect(adapter, 2876 temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
2859 QLCNIC_MS_CTRL); 2877 if (err == -EIO) {
2878 mutex_unlock(&adapter->ahw->mem_lock);
2879 return err;
2880 }
2881
2860 if ((temp & TA_CTL_BUSY) == 0) 2882 if ((temp & TA_CTL_BUSY) == 0)
2861 break; 2883 break;
2862 } 2884 }
@@ -2878,9 +2900,9 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
2878int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, 2900int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
2879 u8 *p_data, int count) 2901 u8 *p_data, int count)
2880{ 2902{
2881 int i, ret; 2903 u32 word, addr = flash_addr, ret;
2882 u32 word, addr = flash_addr;
2883 ulong indirect_addr; 2904 ulong indirect_addr;
2905 int i, err = 0;
2884 2906
2885 if (qlcnic_83xx_lock_flash(adapter) != 0) 2907 if (qlcnic_83xx_lock_flash(adapter) != 0)
2886 return -EIO; 2908 return -EIO;
@@ -2900,10 +2922,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
2900 } 2922 }
2901 2923
2902 indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); 2924 indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
2903 ret = qlcnic_83xx_rd_reg_indirect(adapter, 2925 ret = QLCRD32(adapter, indirect_addr, &err);
2904 indirect_addr); 2926 if (err == -EIO)
2905 if (ret == -EIO) 2927 return err;
2906 return -EIO; 2928
2907 word = ret; 2929 word = ret;
2908 *(u32 *)p_data = word; 2930 *(u32 *)p_data = word;
2909 p_data = p_data + 4; 2931 p_data = p_data + 4;
@@ -3014,8 +3036,8 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
3014 } 3036 }
3015 3037
3016 if (ahw->port_type == QLCNIC_XGBE) { 3038 if (ahw->port_type == QLCNIC_XGBE) {
3017 ecmd->supported = SUPPORTED_1000baseT_Full; 3039 ecmd->supported = SUPPORTED_10000baseT_Full;
3018 ecmd->advertising = ADVERTISED_1000baseT_Full; 3040 ecmd->advertising = ADVERTISED_10000baseT_Full;
3019 } else { 3041 } else {
3020 ecmd->supported = (SUPPORTED_10baseT_Half | 3042 ecmd->supported = (SUPPORTED_10baseT_Half |
3021 SUPPORTED_10baseT_Full | 3043 SUPPORTED_10baseT_Full |
@@ -3369,7 +3391,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
3369 3391
3370static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) 3392static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
3371{ 3393{
3372 int ret; 3394 int ret, err = 0;
3395 u32 temp;
3373 3396
3374 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, 3397 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
3375 QLC_83XX_FLASH_OEM_READ_SIG); 3398 QLC_83XX_FLASH_OEM_READ_SIG);
@@ -3379,8 +3402,11 @@ static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
3379 if (ret) 3402 if (ret)
3380 return -EIO; 3403 return -EIO;
3381 3404
3382 ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); 3405 temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
3383 return ret & 0xFF; 3406 if (err == -EIO)
3407 return err;
3408
3409 return temp & 0xFF;
3384} 3410}
3385 3411
3386int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) 3412int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 2548d1403d75..272f56a2e14b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -508,7 +508,7 @@ void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
508void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *); 508void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
509void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 509void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
510void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 510void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
511int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong); 511int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
512int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); 512int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
513void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []); 513void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
514int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); 514int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f41dfab1e9a3..9f4b8d5f0865 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1303,8 +1303,11 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1303{ 1303{
1304 int i, j; 1304 int i, j;
1305 u32 val = 0, val1 = 0, reg = 0; 1305 u32 val = 0, val1 = 0, reg = 0;
1306 int err = 0;
1306 1307
1307 val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG); 1308 val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
1309 if (err == -EIO)
1310 return;
1308 dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); 1311 dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
1309 1312
1310 for (j = 0; j < 2; j++) { 1313 for (j = 0; j < 2; j++) {
@@ -1318,7 +1321,9 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1318 reg = QLC_83XX_PORT1_THRESHOLD; 1321 reg = QLC_83XX_PORT1_THRESHOLD;
1319 } 1322 }
1320 for (i = 0; i < 8; i++) { 1323 for (i = 0; i < 8; i++) {
1321 val = QLCRD32(adapter, reg + (i * 0x4)); 1324 val = QLCRD32(adapter, reg + (i * 0x4), &err);
1325 if (err == -EIO)
1326 return;
1322 dev_info(&adapter->pdev->dev, "0x%x ", val); 1327 dev_info(&adapter->pdev->dev, "0x%x ", val);
1323 } 1328 }
1324 dev_info(&adapter->pdev->dev, "\n"); 1329 dev_info(&adapter->pdev->dev, "\n");
@@ -1335,8 +1340,10 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1335 reg = QLC_83XX_PORT1_TC_MC_REG; 1340 reg = QLC_83XX_PORT1_TC_MC_REG;
1336 } 1341 }
1337 for (i = 0; i < 4; i++) { 1342 for (i = 0; i < 4; i++) {
1338 val = QLCRD32(adapter, reg + (i * 0x4)); 1343 val = QLCRD32(adapter, reg + (i * 0x4), &err);
1339 dev_info(&adapter->pdev->dev, "0x%x ", val); 1344 if (err == -EIO)
1345 return;
1346 dev_info(&adapter->pdev->dev, "0x%x ", val);
1340 } 1347 }
1341 dev_info(&adapter->pdev->dev, "\n"); 1348 dev_info(&adapter->pdev->dev, "\n");
1342 } 1349 }
@@ -1352,17 +1359,25 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1352 reg = QLC_83XX_PORT1_TC_STATS; 1359 reg = QLC_83XX_PORT1_TC_STATS;
1353 } 1360 }
1354 for (i = 7; i >= 0; i--) { 1361 for (i = 7; i >= 0; i--) {
1355 val = QLCRD32(adapter, reg); 1362 val = QLCRD32(adapter, reg, &err);
1363 if (err == -EIO)
1364 return;
1356 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ 1365 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
1357 QLCWR32(adapter, reg, (val | (i << 29))); 1366 QLCWR32(adapter, reg, (val | (i << 29)));
1358 val = QLCRD32(adapter, reg); 1367 val = QLCRD32(adapter, reg, &err);
1368 if (err == -EIO)
1369 return;
1359 dev_info(&adapter->pdev->dev, "0x%x ", val); 1370 dev_info(&adapter->pdev->dev, "0x%x ", val);
1360 } 1371 }
1361 dev_info(&adapter->pdev->dev, "\n"); 1372 dev_info(&adapter->pdev->dev, "\n");
1362 } 1373 }
1363 1374
1364 val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD); 1375 val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
1365 val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD); 1376 if (err == -EIO)
1377 return;
1378 val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
1379 if (err == -EIO)
1380 return;
1366 dev_info(&adapter->pdev->dev, 1381 dev_info(&adapter->pdev->dev,
1367 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", 1382 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1368 val, val1); 1383 val, val1);
@@ -1425,7 +1440,7 @@ static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
1425static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) 1440static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
1426{ 1441{
1427 u32 heartbeat, peg_status; 1442 u32 heartbeat, peg_status;
1428 int retries, ret = -EIO; 1443 int retries, ret = -EIO, err = 0;
1429 1444
1430 retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; 1445 retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
1431 p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, 1446 p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
@@ -1453,11 +1468,11 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
1453 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" 1468 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
1454 "PEG_NET_4_PC: 0x%x\n", peg_status, 1469 "PEG_NET_4_PC: 0x%x\n", peg_status,
1455 QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), 1470 QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
1456 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0), 1471 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
1457 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1), 1472 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
1458 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2), 1473 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
1459 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3), 1474 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
1460 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4)); 1475 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
1461 1476
1462 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) 1477 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
1463 dev_err(&p_dev->pdev->dev, 1478 dev_err(&p_dev->pdev->dev,
@@ -1501,18 +1516,22 @@ int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
1501static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, 1516static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
1502 int duration, u32 mask, u32 status) 1517 int duration, u32 mask, u32 status)
1503{ 1518{
1519 int timeout_error, err = 0;
1504 u32 value; 1520 u32 value;
1505 int timeout_error;
1506 u8 retries; 1521 u8 retries;
1507 1522
1508 value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); 1523 value = QLCRD32(p_dev, addr, &err);
1524 if (err == -EIO)
1525 return err;
1509 retries = duration / 10; 1526 retries = duration / 10;
1510 1527
1511 do { 1528 do {
1512 if ((value & mask) != status) { 1529 if ((value & mask) != status) {
1513 timeout_error = 1; 1530 timeout_error = 1;
1514 msleep(duration / 10); 1531 msleep(duration / 10);
1515 value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); 1532 value = QLCRD32(p_dev, addr, &err);
1533 if (err == -EIO)
1534 return err;
1516 } else { 1535 } else {
1517 timeout_error = 0; 1536 timeout_error = 0;
1518 break; 1537 break;
@@ -1606,9 +1625,12 @@ int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
1606static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, 1625static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
1607 u32 raddr, u32 waddr) 1626 u32 raddr, u32 waddr)
1608{ 1627{
1609 int value; 1628 int err = 0;
1629 u32 value;
1610 1630
1611 value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); 1631 value = QLCRD32(p_dev, raddr, &err);
1632 if (err == -EIO)
1633 return;
1612 qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); 1634 qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
1613} 1635}
1614 1636
@@ -1617,12 +1639,16 @@ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
1617 u32 raddr, u32 waddr, 1639 u32 raddr, u32 waddr,
1618 struct qlc_83xx_rmw *p_rmw_hdr) 1640 struct qlc_83xx_rmw *p_rmw_hdr)
1619{ 1641{
1620 int value; 1642 int err = 0;
1643 u32 value;
1621 1644
1622 if (p_rmw_hdr->index_a) 1645 if (p_rmw_hdr->index_a) {
1623 value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; 1646 value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
1624 else 1647 } else {
1625 value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); 1648 value = QLCRD32(p_dev, raddr, &err);
1649 if (err == -EIO)
1650 return;
1651 }
1626 1652
1627 value &= p_rmw_hdr->mask; 1653 value &= p_rmw_hdr->mask;
1628 value <<= p_rmw_hdr->shl; 1654 value <<= p_rmw_hdr->shl;
@@ -1675,7 +1701,7 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
1675 long delay; 1701 long delay;
1676 struct qlc_83xx_entry *entry; 1702 struct qlc_83xx_entry *entry;
1677 struct qlc_83xx_poll *poll; 1703 struct qlc_83xx_poll *poll;
1678 int i; 1704 int i, err = 0;
1679 unsigned long arg1, arg2; 1705 unsigned long arg1, arg2;
1680 1706
1681 poll = (struct qlc_83xx_poll *)((char *)p_hdr + 1707 poll = (struct qlc_83xx_poll *)((char *)p_hdr +
@@ -1699,10 +1725,12 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
1699 arg1, delay, 1725 arg1, delay,
1700 poll->mask, 1726 poll->mask,
1701 poll->status)){ 1727 poll->status)){
1702 qlcnic_83xx_rd_reg_indirect(p_dev, 1728 QLCRD32(p_dev, arg1, &err);
1703 arg1); 1729 if (err == -EIO)
1704 qlcnic_83xx_rd_reg_indirect(p_dev, 1730 return;
1705 arg2); 1731 QLCRD32(p_dev, arg2, &err);
1732 if (err == -EIO)
1733 return;
1706 } 1734 }
1707 } 1735 }
1708 } 1736 }
@@ -1768,7 +1796,7 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
1768 struct qlc_83xx_entry_hdr *p_hdr) 1796 struct qlc_83xx_entry_hdr *p_hdr)
1769{ 1797{
1770 long delay; 1798 long delay;
1771 int index, i, j; 1799 int index, i, j, err;
1772 struct qlc_83xx_quad_entry *entry; 1800 struct qlc_83xx_quad_entry *entry;
1773 struct qlc_83xx_poll *poll; 1801 struct qlc_83xx_poll *poll;
1774 unsigned long addr; 1802 unsigned long addr;
@@ -1788,7 +1816,10 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
1788 poll->mask, poll->status)){ 1816 poll->mask, poll->status)){
1789 index = p_dev->ahw->reset.array_index; 1817 index = p_dev->ahw->reset.array_index;
1790 addr = entry->dr_addr; 1818 addr = entry->dr_addr;
1791 j = qlcnic_83xx_rd_reg_indirect(p_dev, addr); 1819 j = QLCRD32(p_dev, addr, &err);
1820 if (err == -EIO)
1821 return;
1822
1792 p_dev->ahw->reset.array[index++] = j; 1823 p_dev->ahw->reset.array[index++] = j;
1793 1824
1794 if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) 1825 if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
@@ -2123,6 +2154,8 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2123 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 2154 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
2124 qlcnic_83xx_clear_function_resources(adapter); 2155 qlcnic_83xx_clear_function_resources(adapter);
2125 2156
2157 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2158
2126 /* register for NIC IDC AEN Events */ 2159 /* register for NIC IDC AEN Events */
2127 qlcnic_83xx_register_nic_idc_func(adapter, 1); 2160 qlcnic_83xx_register_nic_idc_func(adapter, 1);
2128 2161
@@ -2140,8 +2173,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2140 if (adapter->nic_ops->init_driver(adapter)) 2173 if (adapter->nic_ops->init_driver(adapter))
2141 return -EIO; 2174 return -EIO;
2142 2175
2143 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2144
2145 /* Periodically monitor device status */ 2176 /* Periodically monitor device status */
2146 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); 2177 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
2147 2178
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 0581a484ceb5..d09389b33474 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -104,7 +104,7 @@ static u32
104qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 104qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
105{ 105{
106 u32 rsp; 106 u32 rsp;
107 int timeout = 0; 107 int timeout = 0, err = 0;
108 108
109 do { 109 do {
110 /* give atleast 1ms for firmware to respond */ 110 /* give atleast 1ms for firmware to respond */
@@ -113,7 +113,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
113 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 113 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
114 return QLCNIC_CDRP_RSP_TIMEOUT; 114 return QLCNIC_CDRP_RSP_TIMEOUT;
115 115
116 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); 116 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
117 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 117 } while (!QLCNIC_CDRP_IS_RSP(rsp));
118 118
119 return rsp; 119 return rsp;
@@ -122,7 +122,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
122int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 122int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
123 struct qlcnic_cmd_args *cmd) 123 struct qlcnic_cmd_args *cmd)
124{ 124{
125 int i; 125 int i, err = 0;
126 u32 rsp; 126 u32 rsp;
127 u32 signature; 127 u32 signature;
128 struct pci_dev *pdev = adapter->pdev; 128 struct pci_dev *pdev = adapter->pdev;
@@ -148,7 +148,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
148 dev_err(&pdev->dev, "card response timeout.\n"); 148 dev_err(&pdev->dev, "card response timeout.\n");
149 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 149 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
150 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 150 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
151 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1)); 151 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
152 switch (cmd->rsp.arg[0]) { 152 switch (cmd->rsp.arg[0]) {
153 case QLCNIC_RCODE_INVALID_ARGS: 153 case QLCNIC_RCODE_INVALID_ARGS:
154 fmt = "CDRP invalid args: [%d]\n"; 154 fmt = "CDRP invalid args: [%d]\n";
@@ -175,7 +175,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
175 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 175 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
176 176
177 for (i = 1; i < cmd->rsp.num; i++) 177 for (i = 1; i < cmd->rsp.num; i++)
178 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i)); 178 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
179 179
180 /* Release semaphore */ 180 /* Release semaphore */
181 qlcnic_api_unlock(adapter); 181 qlcnic_api_unlock(adapter);
@@ -210,10 +210,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
210 if (err) { 210 if (err) {
211 dev_info(&adapter->pdev->dev, 211 dev_info(&adapter->pdev->dev,
212 "Failed to set driver version in firmware\n"); 212 "Failed to set driver version in firmware\n");
213 return -EIO; 213 err = -EIO;
214 } 214 }
215 215 qlcnic_free_mbx_args(&cmd);
216 return 0; 216 return err;
217} 217}
218 218
219int 219int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 700a46324d09..7aac23ab31d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -150,6 +150,7 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
150 "Link_Test_on_offline", 150 "Link_Test_on_offline",
151 "Interrupt_Test_offline", 151 "Interrupt_Test_offline",
152 "Internal_Loopback_offline", 152 "Internal_Loopback_offline",
153 "External_Loopback_offline",
153 "EEPROM_Test_offline" 154 "EEPROM_Test_offline"
154}; 155};
155 156
@@ -266,7 +267,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
266{ 267{
267 struct qlcnic_hardware_context *ahw = adapter->ahw; 268 struct qlcnic_hardware_context *ahw = adapter->ahw;
268 u32 speed, reg; 269 u32 speed, reg;
269 int check_sfp_module = 0; 270 int check_sfp_module = 0, err = 0;
270 u16 pcifn = ahw->pci_func; 271 u16 pcifn = ahw->pci_func;
271 272
272 /* read which mode */ 273 /* read which mode */
@@ -289,7 +290,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
289 290
290 } else if (adapter->ahw->port_type == QLCNIC_XGBE) { 291 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
291 u32 val = 0; 292 u32 val = 0;
292 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); 293 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
293 294
294 if (val == QLCNIC_PORT_MODE_802_3_AP) { 295 if (val == QLCNIC_PORT_MODE_802_3_AP) {
295 ecmd->supported = SUPPORTED_1000baseT_Full; 296 ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -300,9 +301,13 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
300 } 301 }
301 302
302 if (netif_running(adapter->netdev) && ahw->has_link_events) { 303 if (netif_running(adapter->netdev) && ahw->has_link_events) {
303 reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); 304 if (ahw->linkup) {
304 speed = P3P_LINK_SPEED_VAL(pcifn, reg); 305 reg = QLCRD32(adapter,
305 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; 306 P3P_LINK_SPEED_REG(pcifn), &err);
307 speed = P3P_LINK_SPEED_VAL(pcifn, reg);
308 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
309 }
310
306 ethtool_cmd_speed_set(ecmd, ahw->link_speed); 311 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
307 ecmd->autoneg = ahw->link_autoneg; 312 ecmd->autoneg = ahw->link_autoneg;
308 ecmd->duplex = ahw->link_duplex; 313 ecmd->duplex = ahw->link_duplex;
@@ -463,13 +468,14 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
463static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter, 468static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
464 u32 *regs_buff) 469 u32 *regs_buff)
465{ 470{
466 int i, j = 0; 471 int i, j = 0, err = 0;
467 472
468 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) 473 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
469 regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]); 474 regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
470 j = 0; 475 j = 0;
471 while (ext_diag_registers[j] != -1) 476 while (ext_diag_registers[j] != -1)
472 regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]); 477 regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
478 &err);
473 return i; 479 return i;
474} 480}
475 481
@@ -519,13 +525,16 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
519static u32 qlcnic_test_link(struct net_device *dev) 525static u32 qlcnic_test_link(struct net_device *dev)
520{ 526{
521 struct qlcnic_adapter *adapter = netdev_priv(dev); 527 struct qlcnic_adapter *adapter = netdev_priv(dev);
528 int err = 0;
522 u32 val; 529 u32 val;
523 530
524 if (qlcnic_83xx_check(adapter)) { 531 if (qlcnic_83xx_check(adapter)) {
525 val = qlcnic_83xx_test_link(adapter); 532 val = qlcnic_83xx_test_link(adapter);
526 return (val & 1) ? 0 : 1; 533 return (val & 1) ? 0 : 1;
527 } 534 }
528 val = QLCRD32(adapter, CRB_XG_STATE_P3P); 535 val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
536 if (err == -EIO)
537 return err;
529 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); 538 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
530 return (val == XG_LINK_UP_P3P) ? 0 : 1; 539 return (val == XG_LINK_UP_P3P) ? 0 : 1;
531} 540}
@@ -658,6 +667,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
658{ 667{
659 struct qlcnic_adapter *adapter = netdev_priv(netdev); 668 struct qlcnic_adapter *adapter = netdev_priv(netdev);
660 int port = adapter->ahw->physical_port; 669 int port = adapter->ahw->physical_port;
670 int err = 0;
661 __u32 val; 671 __u32 val;
662 672
663 if (qlcnic_83xx_check(adapter)) { 673 if (qlcnic_83xx_check(adapter)) {
@@ -668,9 +678,13 @@ qlcnic_get_pauseparam(struct net_device *netdev,
668 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 678 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
669 return; 679 return;
670 /* get flow control settings */ 680 /* get flow control settings */
671 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); 681 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
682 if (err == -EIO)
683 return;
672 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); 684 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
673 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); 685 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
686 if (err == -EIO)
687 return;
674 switch (port) { 688 switch (port) {
675 case 0: 689 case 0:
676 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); 690 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
@@ -690,7 +704,9 @@ qlcnic_get_pauseparam(struct net_device *netdev,
690 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) 704 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
691 return; 705 return;
692 pause->rx_pause = 1; 706 pause->rx_pause = 1;
693 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); 707 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
708 if (err == -EIO)
709 return;
694 if (port == 0) 710 if (port == 0)
695 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); 711 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
696 else 712 else
@@ -707,6 +723,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
707{ 723{
708 struct qlcnic_adapter *adapter = netdev_priv(netdev); 724 struct qlcnic_adapter *adapter = netdev_priv(netdev);
709 int port = adapter->ahw->physical_port; 725 int port = adapter->ahw->physical_port;
726 int err = 0;
710 __u32 val; 727 __u32 val;
711 728
712 if (qlcnic_83xx_check(adapter)) 729 if (qlcnic_83xx_check(adapter))
@@ -717,7 +734,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
717 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 734 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
718 return -EIO; 735 return -EIO;
719 /* set flow control */ 736 /* set flow control */
720 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); 737 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
738 if (err == -EIO)
739 return err;
721 740
722 if (pause->rx_pause) 741 if (pause->rx_pause)
723 qlcnic_gb_rx_flowctl(val); 742 qlcnic_gb_rx_flowctl(val);
@@ -728,7 +747,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
728 val); 747 val);
729 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); 748 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
730 /* set autoneg */ 749 /* set autoneg */
731 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); 750 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
751 if (err == -EIO)
752 return err;
732 switch (port) { 753 switch (port) {
733 case 0: 754 case 0:
734 if (pause->tx_pause) 755 if (pause->tx_pause)
@@ -764,7 +785,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
764 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) 785 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
765 return -EIO; 786 return -EIO;
766 787
767 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); 788 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
789 if (err == -EIO)
790 return err;
768 if (port == 0) { 791 if (port == 0) {
769 if (pause->tx_pause) 792 if (pause->tx_pause)
770 qlcnic_xg_unset_xg0_mask(val); 793 qlcnic_xg_unset_xg0_mask(val);
@@ -788,11 +811,14 @@ static int qlcnic_reg_test(struct net_device *dev)
788{ 811{
789 struct qlcnic_adapter *adapter = netdev_priv(dev); 812 struct qlcnic_adapter *adapter = netdev_priv(dev);
790 u32 data_read; 813 u32 data_read;
814 int err = 0;
791 815
792 if (qlcnic_83xx_check(adapter)) 816 if (qlcnic_83xx_check(adapter))
793 return qlcnic_83xx_reg_test(adapter); 817 return qlcnic_83xx_reg_test(adapter);
794 818
795 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); 819 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
820 if (err == -EIO)
821 return err;
796 if ((data_read & 0xffff) != adapter->pdev->vendor) 822 if ((data_read & 0xffff) != adapter->pdev->vendor)
797 return 1; 823 return 1;
798 824
@@ -1026,8 +1052,15 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
1026 if (data[3]) 1052 if (data[3])
1027 eth_test->flags |= ETH_TEST_FL_FAILED; 1053 eth_test->flags |= ETH_TEST_FL_FAILED;
1028 1054
1029 data[4] = qlcnic_eeprom_test(dev); 1055 if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
1030 if (data[4]) 1056 data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
1057 if (data[4])
1058 eth_test->flags |= ETH_TEST_FL_FAILED;
1059 eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
1060 }
1061
1062 data[5] = qlcnic_eeprom_test(dev);
1063 if (data[5])
1031 eth_test->flags |= ETH_TEST_FL_FAILED; 1064 eth_test->flags |= ETH_TEST_FL_FAILED;
1032 } 1065 }
1033} 1066}
@@ -1257,17 +1290,20 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1257{ 1290{
1258 struct qlcnic_adapter *adapter = netdev_priv(dev); 1291 struct qlcnic_adapter *adapter = netdev_priv(dev);
1259 u32 wol_cfg; 1292 u32 wol_cfg;
1293 int err = 0;
1260 1294
1261 if (qlcnic_83xx_check(adapter)) 1295 if (qlcnic_83xx_check(adapter))
1262 return; 1296 return;
1263 wol->supported = 0; 1297 wol->supported = 0;
1264 wol->wolopts = 0; 1298 wol->wolopts = 0;
1265 1299
1266 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); 1300 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
1301 if (err == -EIO)
1302 return;
1267 if (wol_cfg & (1UL << adapter->portnum)) 1303 if (wol_cfg & (1UL << adapter->portnum))
1268 wol->supported |= WAKE_MAGIC; 1304 wol->supported |= WAKE_MAGIC;
1269 1305
1270 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); 1306 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
1271 if (wol_cfg & (1UL << adapter->portnum)) 1307 if (wol_cfg & (1UL << adapter->portnum))
1272 wol->wolopts |= WAKE_MAGIC; 1308 wol->wolopts |= WAKE_MAGIC;
1273} 1309}
@@ -1277,17 +1313,22 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1277{ 1313{
1278 struct qlcnic_adapter *adapter = netdev_priv(dev); 1314 struct qlcnic_adapter *adapter = netdev_priv(dev);
1279 u32 wol_cfg; 1315 u32 wol_cfg;
1316 int err = 0;
1280 1317
1281 if (qlcnic_83xx_check(adapter)) 1318 if (qlcnic_83xx_check(adapter))
1282 return -EOPNOTSUPP; 1319 return -EOPNOTSUPP;
1283 if (wol->wolopts & ~WAKE_MAGIC) 1320 if (wol->wolopts & ~WAKE_MAGIC)
1284 return -EINVAL; 1321 return -EINVAL;
1285 1322
1286 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); 1323 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
1324 if (err == -EIO)
1325 return err;
1287 if (!(wol_cfg & (1 << adapter->portnum))) 1326 if (!(wol_cfg & (1 << adapter->portnum)))
1288 return -EOPNOTSUPP; 1327 return -EOPNOTSUPP;
1289 1328
1290 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); 1329 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
1330 if (err == -EIO)
1331 return err;
1291 if (wol->wolopts & WAKE_MAGIC) 1332 if (wol->wolopts & WAKE_MAGIC)
1292 wol_cfg |= 1UL << adapter->portnum; 1333 wol_cfg |= 1UL << adapter->portnum;
1293 else 1334 else
@@ -1540,7 +1581,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1540 return 0; 1581 return 0;
1541 case QLCNIC_SET_QUIESCENT: 1582 case QLCNIC_SET_QUIESCENT:
1542 case QLCNIC_RESET_QUIESCENT: 1583 case QLCNIC_RESET_QUIESCENT:
1543 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 1584 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1544 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) 1585 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
1545 netdev_info(netdev, "Device in FAILED state\n"); 1586 netdev_info(netdev, "Device in FAILED state\n");
1546 return 0; 1587 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 5b5d2edf125d..4d5f59b2d153 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,16 +317,20 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
317int 317int
318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) 318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
319{ 319{
320 int done = 0, timeout = 0; 320 int timeout = 0;
321 int err = 0;
322 u32 done = 0;
321 323
322 while (!done) { 324 while (!done) {
323 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); 325 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
326 &err);
324 if (done == 1) 327 if (done == 1)
325 break; 328 break;
326 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { 329 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
327 dev_err(&adapter->pdev->dev, 330 dev_err(&adapter->pdev->dev,
328 "Failed to acquire sem=%d lock; holdby=%d\n", 331 "Failed to acquire sem=%d lock; holdby=%d\n",
329 sem, id_reg ? QLCRD32(adapter, id_reg) : -1); 332 sem,
333 id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
330 return -EIO; 334 return -EIO;
331 } 335 }
332 msleep(1); 336 msleep(1);
@@ -341,19 +345,22 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
341void 345void
342qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) 346qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
343{ 347{
344 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); 348 int err = 0;
349
350 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
345} 351}
346 352
347int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) 353int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
348{ 354{
355 int err = 0;
349 u32 data; 356 u32 data;
350 357
351 if (qlcnic_82xx_check(adapter)) 358 if (qlcnic_82xx_check(adapter))
352 qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data); 359 qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
353 else { 360 else {
354 data = qlcnic_83xx_rd_reg_indirect(adapter, addr); 361 data = QLCRD32(adapter, addr, &err);
355 if (data == -EIO) 362 if (err == -EIO)
356 return -EIO; 363 return err;
357 } 364 }
358 return data; 365 return data;
359} 366}
@@ -516,20 +523,18 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
516 if (netdev->flags & IFF_PROMISC) { 523 if (netdev->flags & IFF_PROMISC) {
517 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) 524 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
518 mode = VPORT_MISS_MODE_ACCEPT_ALL; 525 mode = VPORT_MISS_MODE_ACCEPT_ALL;
519 } else if (netdev->flags & IFF_ALLMULTI) { 526 } else if ((netdev->flags & IFF_ALLMULTI) ||
520 if (netdev_mc_count(netdev) > ahw->max_mc_count) { 527 (netdev_mc_count(netdev) > ahw->max_mc_count)) {
521 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 528 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
522 } else if (!netdev_mc_empty(netdev) && 529 } else if (!netdev_mc_empty(netdev) &&
523 !qlcnic_sriov_vf_check(adapter)) { 530 !qlcnic_sriov_vf_check(adapter)) {
524 netdev_for_each_mc_addr(ha, netdev) 531 netdev_for_each_mc_addr(ha, netdev)
525 qlcnic_nic_add_mac(adapter, ha->addr, 532 qlcnic_nic_add_mac(adapter, ha->addr, vlan);
526 vlan);
527 }
528 if (mode != VPORT_MISS_MODE_ACCEPT_MULTI &&
529 qlcnic_sriov_vf_check(adapter))
530 qlcnic_vf_add_mc_list(netdev, vlan);
531 } 533 }
532 534
535 if (qlcnic_sriov_vf_check(adapter))
536 qlcnic_vf_add_mc_list(netdev, vlan);
537
533 /* configure unicast MAC address, if there is not sufficient space 538 /* configure unicast MAC address, if there is not sufficient space
534 * to store all the unicast addresses then enable promiscuous mode 539 * to store all the unicast addresses then enable promiscuous mode
535 */ 540 */
@@ -1161,7 +1166,8 @@ int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
1161 return -EIO; 1166 return -EIO;
1162} 1167}
1163 1168
1164int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) 1169int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
1170 int *err)
1165{ 1171{
1166 unsigned long flags; 1172 unsigned long flags;
1167 int rv; 1173 int rv;
@@ -1417,7 +1423,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1417 1423
1418int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) 1424int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
1419{ 1425{
1420 int offset, board_type, magic; 1426 int offset, board_type, magic, err = 0;
1421 struct pci_dev *pdev = adapter->pdev; 1427 struct pci_dev *pdev = adapter->pdev;
1422 1428
1423 offset = QLCNIC_FW_MAGIC_OFFSET; 1429 offset = QLCNIC_FW_MAGIC_OFFSET;
@@ -1437,7 +1443,9 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
1437 adapter->ahw->board_type = board_type; 1443 adapter->ahw->board_type = board_type;
1438 1444
1439 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { 1445 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1440 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); 1446 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
1447 if (err == -EIO)
1448 return err;
1441 if ((gpio & 0x8000) == 0) 1449 if ((gpio & 0x8000) == 0)
1442 board_type = QLCNIC_BRDTYPE_P3P_10G_TP; 1450 board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1443 } 1451 }
@@ -1477,10 +1485,13 @@ int
1477qlcnic_wol_supported(struct qlcnic_adapter *adapter) 1485qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1478{ 1486{
1479 u32 wol_cfg; 1487 u32 wol_cfg;
1488 int err = 0;
1480 1489
1481 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); 1490 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
1482 if (wol_cfg & (1UL << adapter->portnum)) { 1491 if (wol_cfg & (1UL << adapter->portnum)) {
1483 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); 1492 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
1493 if (err == -EIO)
1494 return err;
1484 if (wol_cfg & (1 << adapter->portnum)) 1495 if (wol_cfg & (1 << adapter->portnum))
1485 return 1; 1496 return 1;
1486 } 1497 }
@@ -1541,6 +1552,7 @@ void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
1541void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, 1552void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
1542 loff_t offset, size_t size) 1553 loff_t offset, size_t size)
1543{ 1554{
1555 int err = 0;
1544 u32 data; 1556 u32 data;
1545 u64 qmdata; 1557 u64 qmdata;
1546 1558
@@ -1548,7 +1560,7 @@ void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
1548 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); 1560 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
1549 memcpy(buf, &qmdata, size); 1561 memcpy(buf, &qmdata, size);
1550 } else { 1562 } else {
1551 data = QLCRD32(adapter, offset); 1563 data = QLCRD32(adapter, offset, &err);
1552 memcpy(buf, &data, size); 1564 memcpy(buf, &data, size);
1553 } 1565 }
1554} 1566}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 2c22504f57aa..4a71b28effcb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -154,7 +154,7 @@ struct qlcnic_hardware_context;
154struct qlcnic_adapter; 154struct qlcnic_adapter;
155 155
156int qlcnic_82xx_start_firmware(struct qlcnic_adapter *); 156int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
157int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong); 157int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
158int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); 158int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
159int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int); 159int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
160int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32); 160int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index d28336fc65ab..974d62607e13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -142,7 +142,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
142 buffrag->length, PCI_DMA_TODEVICE); 142 buffrag->length, PCI_DMA_TODEVICE);
143 buffrag->dma = 0ULL; 143 buffrag->dma = 0ULL;
144 } 144 }
145 for (j = 0; j < cmd_buf->frag_count; j++) { 145 for (j = 1; j < cmd_buf->frag_count; j++) {
146 buffrag++; 146 buffrag++;
147 if (buffrag->dma) { 147 if (buffrag->dma) {
148 pci_unmap_page(adapter->pdev, buffrag->dma, 148 pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -286,10 +286,11 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
286{ 286{
287 long timeout = 0; 287 long timeout = 0;
288 long done = 0; 288 long done = 0;
289 int err = 0;
289 290
290 cond_resched(); 291 cond_resched();
291 while (done == 0) { 292 while (done == 0) {
292 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); 293 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
293 done &= 2; 294 done &= 2;
294 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { 295 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
295 dev_err(&adapter->pdev->dev, 296 dev_err(&adapter->pdev->dev,
@@ -304,6 +305,8 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
304static int do_rom_fast_read(struct qlcnic_adapter *adapter, 305static int do_rom_fast_read(struct qlcnic_adapter *adapter,
305 u32 addr, u32 *valp) 306 u32 addr, u32 *valp)
306{ 307{
308 int err = 0;
309
307 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); 310 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
308 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 311 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
309 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); 312 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
@@ -317,7 +320,9 @@ static int do_rom_fast_read(struct qlcnic_adapter *adapter,
317 udelay(10); 320 udelay(10);
318 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 321 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
319 322
320 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); 323 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
324 if (err == -EIO)
325 return err;
321 return 0; 326 return 0;
322} 327}
323 328
@@ -369,11 +374,11 @@ int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
369 374
370int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) 375int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
371{ 376{
372 int addr, val; 377 int addr, err = 0;
373 int i, n, init_delay; 378 int i, n, init_delay;
374 struct crb_addr_pair *buf; 379 struct crb_addr_pair *buf;
375 unsigned offset; 380 unsigned offset;
376 u32 off; 381 u32 off, val;
377 struct pci_dev *pdev = adapter->pdev; 382 struct pci_dev *pdev = adapter->pdev;
378 383
379 QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0); 384 QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
@@ -402,7 +407,9 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
402 QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00); 407 QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
403 408
404 /* halt sre */ 409 /* halt sre */
405 val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000); 410 val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
411 if (err == -EIO)
412 return err;
406 QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1))); 413 QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
407 414
408 /* halt epg */ 415 /* halt epg */
@@ -719,10 +726,12 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
719static int 726static int
720qlcnic_has_mn(struct qlcnic_adapter *adapter) 727qlcnic_has_mn(struct qlcnic_adapter *adapter)
721{ 728{
722 u32 capability; 729 u32 capability = 0;
723 capability = 0; 730 int err = 0;
724 731
725 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); 732 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
733 if (err == -EIO)
734 return err;
726 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) 735 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
727 return 1; 736 return 1;
728 737
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index d3f8797efcc3..6946d354f44f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -161,36 +161,68 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
161 return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0; 161 return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
162} 162}
163 163
164static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
165 struct qlcnic_filter *fil,
166 void *addr, u16 vlan_id)
167{
168 int ret;
169 u8 op;
170
171 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
172 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
173 if (ret)
174 return;
175
176 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
177 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
178 if (!ret) {
179 hlist_del(&fil->fnode);
180 adapter->rx_fhash.fnum--;
181 }
182}
183
184static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
185 void *addr, u16 vlan_id)
186{
187 struct qlcnic_filter *tmp_fil = NULL;
188 struct hlist_node *n;
189
190 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
191 if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
192 tmp_fil->vlan_id == vlan_id)
193 return tmp_fil;
194 }
195
196 return NULL;
197}
198
164void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, 199void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
165 int loopback_pkt, u16 vlan_id) 200 int loopback_pkt, u16 vlan_id)
166{ 201{
167 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 202 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
168 struct qlcnic_filter *fil, *tmp_fil; 203 struct qlcnic_filter *fil, *tmp_fil;
169 struct hlist_node *n;
170 struct hlist_head *head; 204 struct hlist_head *head;
171 unsigned long time; 205 unsigned long time;
172 u64 src_addr = 0; 206 u64 src_addr = 0;
173 u8 hindex, found = 0, op; 207 u8 hindex, op;
174 int ret; 208 int ret;
175 209
176 memcpy(&src_addr, phdr->h_source, ETH_ALEN); 210 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
211 hindex = qlcnic_mac_hash(src_addr) &
212 (adapter->fhash.fbucket_size - 1);
177 213
178 if (loopback_pkt) { 214 if (loopback_pkt) {
179 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax) 215 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
180 return; 216 return;
181 217
182 hindex = qlcnic_mac_hash(src_addr) &
183 (adapter->fhash.fbucket_size - 1);
184 head = &(adapter->rx_fhash.fhead[hindex]); 218 head = &(adapter->rx_fhash.fhead[hindex]);
185 219
186 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { 220 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
187 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 221 if (tmp_fil) {
188 tmp_fil->vlan_id == vlan_id) { 222 time = tmp_fil->ftime;
189 time = tmp_fil->ftime; 223 if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
190 if (jiffies > (QLCNIC_READD_AGE * HZ + time)) 224 tmp_fil->ftime = jiffies;
191 tmp_fil->ftime = jiffies; 225 return;
192 return;
193 }
194 } 226 }
195 227
196 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); 228 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
@@ -205,36 +237,37 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
205 adapter->rx_fhash.fnum++; 237 adapter->rx_fhash.fnum++;
206 spin_unlock(&adapter->rx_mac_learn_lock); 238 spin_unlock(&adapter->rx_mac_learn_lock);
207 } else { 239 } else {
208 hindex = qlcnic_mac_hash(src_addr) & 240 head = &adapter->fhash.fhead[hindex];
209 (adapter->fhash.fbucket_size - 1);
210 head = &(adapter->rx_fhash.fhead[hindex]);
211 spin_lock(&adapter->rx_mac_learn_lock);
212 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
213 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
214 tmp_fil->vlan_id == vlan_id) {
215 found = 1;
216 break;
217 }
218 }
219 241
220 if (!found) { 242 spin_lock(&adapter->mac_learn_lock);
221 spin_unlock(&adapter->rx_mac_learn_lock);
222 return;
223 }
224 243
225 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; 244 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
226 ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr, 245 if (tmp_fil) {
227 vlan_id, op);
228 if (!ret) {
229 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; 246 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
230 ret = qlcnic_sre_macaddr_change(adapter, 247 ret = qlcnic_sre_macaddr_change(adapter,
231 (u8 *)&src_addr, 248 (u8 *)&src_addr,
232 vlan_id, op); 249 vlan_id, op);
233 if (!ret) { 250 if (!ret) {
234 hlist_del(&(tmp_fil->fnode)); 251 hlist_del(&tmp_fil->fnode);
235 adapter->rx_fhash.fnum--; 252 adapter->fhash.fnum--;
236 } 253 }
254
255 spin_unlock(&adapter->mac_learn_lock);
256
257 return;
237 } 258 }
259
260 spin_unlock(&adapter->mac_learn_lock);
261
262 head = &adapter->rx_fhash.fhead[hindex];
263
264 spin_lock(&adapter->rx_mac_learn_lock);
265
266 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
267 if (tmp_fil)
268 qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
269 vlan_id);
270
238 spin_unlock(&adapter->rx_mac_learn_lock); 271 spin_unlock(&adapter->rx_mac_learn_lock);
239 } 272 }
240} 273}
@@ -262,7 +295,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
262 295
263 mac_req = (struct qlcnic_mac_req *)&(req->words[0]); 296 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
264 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; 297 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
265 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); 298 memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
266 299
267 vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; 300 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
268 vlan_req->vlan_id = cpu_to_le16(vlan_id); 301 vlan_req->vlan_id = cpu_to_le16(vlan_id);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 4528f8ec333b..ee013fcc3322 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -977,8 +977,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
977static int 977static int
978qlcnic_initialize_nic(struct qlcnic_adapter *adapter) 978qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
979{ 979{
980 int err;
981 struct qlcnic_info nic_info; 980 struct qlcnic_info nic_info;
981 int err = 0;
982 982
983 memset(&nic_info, 0, sizeof(struct qlcnic_info)); 983 memset(&nic_info, 0, sizeof(struct qlcnic_info));
984 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); 984 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
@@ -993,7 +993,9 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
993 993
994 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { 994 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
995 u32 temp; 995 u32 temp;
996 temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); 996 temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
997 if (err == -EIO)
998 return err;
997 adapter->ahw->extra_capability[0] = temp; 999 adapter->ahw->extra_capability[0] = temp;
998 } 1000 }
999 adapter->ahw->max_mac_filters = nic_info.max_mac_filters; 1001 adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
@@ -1383,6 +1385,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1383 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 1385 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1384 if (qlcnic_82xx_check(adapter)) 1386 if (qlcnic_82xx_check(adapter))
1385 handler = qlcnic_tmp_intr; 1387 handler = qlcnic_tmp_intr;
1388 else
1389 handler = qlcnic_83xx_tmp_intr;
1386 if (!QLCNIC_IS_MSI_FAMILY(adapter)) 1390 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1387 flags |= IRQF_SHARED; 1391 flags |= IRQF_SHARED;
1388 1392
@@ -1531,12 +1535,12 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1531 if (netdev->features & NETIF_F_LRO) 1535 if (netdev->features & NETIF_F_LRO)
1532 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); 1536 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1533 1537
1538 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1534 qlcnic_napi_enable(adapter); 1539 qlcnic_napi_enable(adapter);
1535 1540
1536 qlcnic_linkevent_request(adapter, 1); 1541 qlcnic_linkevent_request(adapter, 1);
1537 1542
1538 adapter->ahw->reset_context = 0; 1543 adapter->ahw->reset_context = 0;
1539 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1540 return 0; 1544 return 0;
1541} 1545}
1542 1546
@@ -2139,7 +2143,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2139 if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x && 2143 if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
2140 !!qlcnic_use_msi) 2144 !!qlcnic_use_msi)
2141 dev_warn(&pdev->dev, 2145 dev_warn(&pdev->dev,
2142 "83xx adapter do not support MSI interrupts\n"); 2146 "Device does not support MSI interrupts\n");
2143 2147
2144 err = qlcnic_setup_intr(adapter, 0); 2148 err = qlcnic_setup_intr(adapter, 0);
2145 if (err) { 2149 if (err) {
@@ -3093,6 +3097,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3093{ 3097{
3094 u32 state = 0, heartbeat; 3098 u32 state = 0, heartbeat;
3095 u32 peg_status; 3099 u32 peg_status;
3100 int err = 0;
3096 3101
3097 if (qlcnic_check_temp(adapter)) 3102 if (qlcnic_check_temp(adapter))
3098 goto detach; 3103 goto detach;
@@ -3139,11 +3144,11 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3139 "PEG_NET_4_PC: 0x%x\n", 3144 "PEG_NET_4_PC: 0x%x\n",
3140 peg_status, 3145 peg_status,
3141 QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2), 3146 QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
3142 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), 3147 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
3143 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), 3148 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
3144 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), 3149 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
3145 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), 3150 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
3146 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); 3151 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
3147 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) 3152 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
3148 dev_err(&adapter->pdev->dev, 3153 dev_err(&adapter->pdev->dev,
3149 "Firmware aborted with error code 0x00006700. " 3154 "Firmware aborted with error code 0x00006700. "
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index ab8a6744d402..79e54efe07b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1084,7 +1084,7 @@ flash_temp:
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
1086 1086
1087 if ((tmpl_hdr->version & 0xffffff) >= 0x20001) 1087 if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
1088 ahw->fw_dump.use_pex_dma = true; 1088 ahw->fw_dump.use_pex_dma = true;
1089 else 1089 else
1090 ahw->fw_dump.use_pex_dma = false; 1090 ahw->fw_dump.use_pex_dma = false;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 62380ce89905..5d40045b3cea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -562,7 +562,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
562 INIT_LIST_HEAD(&adapter->vf_mc_list); 562 INIT_LIST_HEAD(&adapter->vf_mc_list);
563 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 563 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
564 dev_warn(&adapter->pdev->dev, 564 dev_warn(&adapter->pdev->dev,
565 "83xx adapter do not support MSI interrupts\n"); 565 "Device does not support MSI interrupts\n");
566 566
567 err = qlcnic_setup_intr(adapter, 1); 567 err = qlcnic_setup_intr(adapter, 1);
568 if (err) { 568 if (err) {
@@ -762,6 +762,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
762 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 762 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
763 mbx->req.arg[0] = (type | (mbx->req.num << 16) | 763 mbx->req.arg[0] = (type | (mbx->req.num << 16) |
764 (3 << 29)); 764 (3 << 29));
765 mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
765 return 0; 766 return 0;
766 } 767 }
767 } 768 }
@@ -813,6 +814,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
813 cmd->req.num = trans->req_pay_size / 4; 814 cmd->req.num = trans->req_pay_size / 4;
814 cmd->rsp.num = trans->rsp_pay_size / 4; 815 cmd->rsp.num = trans->rsp_pay_size / 4;
815 hdr = trans->rsp_hdr; 816 hdr = trans->rsp_hdr;
817 cmd->op_type = trans->req_hdr->op_type;
816 } 818 }
817 819
818 trans->trans_id = seq; 820 trans->trans_id = seq;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index ee0c1d307966..eb49cd65378c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -635,12 +635,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
635 struct qlcnic_cmd_args *cmd) 635 struct qlcnic_cmd_args *cmd)
636{ 636{
637 struct qlcnic_vf_info *vf = trans->vf; 637 struct qlcnic_vf_info *vf = trans->vf;
638 struct qlcnic_adapter *adapter = vf->adapter; 638 struct qlcnic_vport *vp = vf->vp;
639 int err; 639 struct qlcnic_adapter *adapter;
640 u16 func = vf->pci_func; 640 u16 func = vf->pci_func;
641 int err;
641 642
642 cmd->rsp.arg[0] = trans->req_hdr->cmd_op; 643 adapter = vf->adapter;
643 cmd->rsp.arg[0] |= (1 << 16);
644 644
645 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { 645 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
646 err = qlcnic_sriov_pf_config_vport(adapter, 1, func); 646 err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -650,6 +650,8 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
650 qlcnic_sriov_pf_config_vport(adapter, 0, func); 650 qlcnic_sriov_pf_config_vport(adapter, 0, func);
651 } 651 }
652 } else { 652 } else {
653 if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
654 vp->vlan = 0;
653 err = qlcnic_sriov_pf_config_vport(adapter, 0, func); 655 err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
654 } 656 }
655 657
@@ -1183,7 +1185,7 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
1183 u8 cmd_op, mode = vp->vlan_mode; 1185 u8 cmd_op, mode = vp->vlan_mode;
1184 1186
1185 cmd_op = trans->req_hdr->cmd_op; 1187 cmd_op = trans->req_hdr->cmd_op;
1186 cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25; 1188 cmd->rsp.arg[0] |= 1 << 25;
1187 1189
1188 switch (mode) { 1190 switch (mode) {
1189 case QLC_GUEST_VLAN_MODE: 1191 case QLC_GUEST_VLAN_MODE:
@@ -1561,6 +1563,7 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
1561 struct qlcnic_vf_info *vf) 1563 struct qlcnic_vf_info *vf)
1562{ 1564{
1563 struct net_device *dev = vf->adapter->netdev; 1565 struct net_device *dev = vf->adapter->netdev;
1566 struct qlcnic_vport *vp = vf->vp;
1564 1567
1565 if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) { 1568 if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
1566 clear_bit(QLC_BC_VF_FLR, &vf->state); 1569 clear_bit(QLC_BC_VF_FLR, &vf->state);
@@ -1573,6 +1576,9 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
1573 return; 1576 return;
1574 } 1577 }
1575 1578
1579 if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
1580 vp->vlan = 0;
1581
1576 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); 1582 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
1577 netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func); 1583 netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
1578} 1584}
@@ -1621,13 +1627,15 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1621{ 1627{
1622 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1628 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1623 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1629 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1624 int i, num_vfs = sriov->num_vfs; 1630 int i, num_vfs;
1625 struct qlcnic_vf_info *vf_info; 1631 struct qlcnic_vf_info *vf_info;
1626 u8 *curr_mac; 1632 u8 *curr_mac;
1627 1633
1628 if (!qlcnic_sriov_pf_check(adapter)) 1634 if (!qlcnic_sriov_pf_check(adapter))
1629 return -EOPNOTSUPP; 1635 return -EOPNOTSUPP;
1630 1636
1637 num_vfs = sriov->num_vfs;
1638
1631 if (!is_valid_ether_addr(mac) || vf >= num_vfs) 1639 if (!is_valid_ether_addr(mac) || vf >= num_vfs)
1632 return -EINVAL; 1640 return -EINVAL;
1633 1641
@@ -1741,6 +1749,7 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
1741 1749
1742 switch (vlan) { 1750 switch (vlan) {
1743 case 4095: 1751 case 4095:
1752 vp->vlan = 0;
1744 vp->vlan_mode = QLC_GUEST_VLAN_MODE; 1753 vp->vlan_mode = QLC_GUEST_VLAN_MODE;
1745 break; 1754 break;
1746 case 0: 1755 case 0:
@@ -1759,6 +1768,29 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
1759 return 0; 1768 return 0;
1760} 1769}
1761 1770
1771static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
1772 struct qlcnic_vport *vp, int vf)
1773{
1774 __u32 vlan = 0;
1775
1776 switch (vp->vlan_mode) {
1777 case QLC_PVID_MODE:
1778 vlan = vp->vlan;
1779 break;
1780 case QLC_GUEST_VLAN_MODE:
1781 vlan = MAX_VLAN_ID;
1782 break;
1783 case QLC_NO_VLAN_MODE:
1784 vlan = 0;
1785 break;
1786 default:
1787 netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
1788 vp->vlan_mode, vf);
1789 }
1790
1791 return vlan;
1792}
1793
1762int qlcnic_sriov_get_vf_config(struct net_device *netdev, 1794int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1763 int vf, struct ifla_vf_info *ivi) 1795 int vf, struct ifla_vf_info *ivi)
1764{ 1796{
@@ -1774,7 +1806,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1774 1806
1775 vp = sriov->vf_info[vf].vp; 1807 vp = sriov->vf_info[vf].vp;
1776 memcpy(&ivi->mac, vp->mac, ETH_ALEN); 1808 memcpy(&ivi->mac, vp->mac, ETH_ALEN);
1777 ivi->vlan = vp->vlan; 1809 ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
1778 ivi->qos = vp->qos; 1810 ivi->qos = vp->qos;
1779 ivi->spoofchk = vp->spoofchk; 1811 ivi->spoofchk = vp->spoofchk;
1780 if (vp->max_tx_bw == MAX_BW) 1812 if (vp->max_tx_bw == MAX_BW)
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e6acb9fa5767..6f35f8404d68 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -478,7 +478,7 @@ rx_status_loop:
478 478
479 while (1) { 479 while (1) {
480 u32 status, len; 480 u32 status, len;
481 dma_addr_t mapping; 481 dma_addr_t mapping, new_mapping;
482 struct sk_buff *skb, *new_skb; 482 struct sk_buff *skb, *new_skb;
483 struct cp_desc *desc; 483 struct cp_desc *desc;
484 const unsigned buflen = cp->rx_buf_sz; 484 const unsigned buflen = cp->rx_buf_sz;
@@ -520,6 +520,13 @@ rx_status_loop:
520 goto rx_next; 520 goto rx_next;
521 } 521 }
522 522
523 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
524 PCI_DMA_FROMDEVICE);
525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
526 dev->stats.rx_dropped++;
527 goto rx_next;
528 }
529
523 dma_unmap_single(&cp->pdev->dev, mapping, 530 dma_unmap_single(&cp->pdev->dev, mapping,
524 buflen, PCI_DMA_FROMDEVICE); 531 buflen, PCI_DMA_FROMDEVICE);
525 532
@@ -531,12 +538,11 @@ rx_status_loop:
531 538
532 skb_put(skb, len); 539 skb_put(skb, len);
533 540
534 mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 PCI_DMA_FROMDEVICE);
536 cp->rx_skb[rx_tail] = new_skb; 541 cp->rx_skb[rx_tail] = new_skb;
537 542
538 cp_rx_skb(cp, skb, desc); 543 cp_rx_skb(cp, skb, desc);
539 rx++; 544 rx++;
545 mapping = new_mapping;
540 546
541rx_next: 547rx_next:
542 cp->rx_ring[rx_tail].opts2 = 0; 548 cp->rx_ring[rx_tail].opts2 = 0;
@@ -716,6 +722,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
716 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 722 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
717} 723}
718 724
725static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
726 int first, int entry_last)
727{
728 int frag, index;
729 struct cp_desc *txd;
730 skb_frag_t *this_frag;
731 for (frag = 0; frag+first < entry_last; frag++) {
732 index = first+frag;
733 cp->tx_skb[index] = NULL;
734 txd = &cp->tx_ring[index];
735 this_frag = &skb_shinfo(skb)->frags[frag];
736 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
737 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
738 }
739}
740
719static netdev_tx_t cp_start_xmit (struct sk_buff *skb, 741static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
720 struct net_device *dev) 742 struct net_device *dev)
721{ 743{
@@ -749,6 +771,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
749 771
750 len = skb->len; 772 len = skb->len;
751 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); 773 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
774 if (dma_mapping_error(&cp->pdev->dev, mapping))
775 goto out_dma_error;
776
752 txd->opts2 = opts2; 777 txd->opts2 = opts2;
753 txd->addr = cpu_to_le64(mapping); 778 txd->addr = cpu_to_le64(mapping);
754 wmb(); 779 wmb();
@@ -786,6 +811,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
786 first_len = skb_headlen(skb); 811 first_len = skb_headlen(skb);
787 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, 812 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
788 first_len, PCI_DMA_TODEVICE); 813 first_len, PCI_DMA_TODEVICE);
814 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
815 goto out_dma_error;
816
789 cp->tx_skb[entry] = skb; 817 cp->tx_skb[entry] = skb;
790 entry = NEXT_TX(entry); 818 entry = NEXT_TX(entry);
791 819
@@ -799,6 +827,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
799 mapping = dma_map_single(&cp->pdev->dev, 827 mapping = dma_map_single(&cp->pdev->dev,
800 skb_frag_address(this_frag), 828 skb_frag_address(this_frag),
801 len, PCI_DMA_TODEVICE); 829 len, PCI_DMA_TODEVICE);
830 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
831 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
832 goto out_dma_error;
833 }
834
802 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 835 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
803 836
804 ctrl = eor | len | DescOwn; 837 ctrl = eor | len | DescOwn;
@@ -859,11 +892,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
859 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 892 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
860 netif_stop_queue(dev); 893 netif_stop_queue(dev);
861 894
895out_unlock:
862 spin_unlock_irqrestore(&cp->lock, intr_flags); 896 spin_unlock_irqrestore(&cp->lock, intr_flags);
863 897
864 cpw8(TxPoll, NormalTxPoll); 898 cpw8(TxPoll, NormalTxPoll);
865 899
866 return NETDEV_TX_OK; 900 return NETDEV_TX_OK;
901out_dma_error:
902 kfree_skb(skb);
903 cp->dev->stats.tx_dropped++;
904 goto out_unlock;
867} 905}
868 906
869/* Set or clear the multicast filter for this adaptor. 907/* Set or clear the multicast filter for this adaptor.
@@ -1054,6 +1092,10 @@ static int cp_refill_rx(struct cp_private *cp)
1054 1092
1055 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1093 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1056 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1094 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1095 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1096 kfree_skb(skb);
1097 goto err_out;
1098 }
1057 cp->rx_skb[i] = skb; 1099 cp->rx_skb[i] = skb;
1058 1100
1059 cp->rx_ring[i].opts2 = 0; 1101 cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4106a743ca74..b5eb4195fc99 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3689,7 +3689,7 @@ static void rtl_phy_work(struct rtl8169_private *tp)
3689 if (tp->link_ok(ioaddr)) 3689 if (tp->link_ok(ioaddr))
3690 return; 3690 return;
3691 3691
3692 netif_warn(tp, link, tp->dev, "PHY reset until link up\n"); 3692 netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
3693 3693
3694 tp->phy_reset_enable(tp); 3694 tp->phy_reset_enable(tp);
3695 3695
@@ -6468,6 +6468,8 @@ static int rtl8169_close(struct net_device *dev)
6468 rtl8169_down(dev); 6468 rtl8169_down(dev);
6469 rtl_unlock_work(tp); 6469 rtl_unlock_work(tp);
6470 6470
6471 cancel_work_sync(&tp->wk.work);
6472
6471 free_irq(pdev->irq, dev); 6473 free_irq(pdev->irq, dev);
6472 6474
6473 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, 6475 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -6793,8 +6795,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
6793 rtl8168_driver_stop(tp); 6795 rtl8168_driver_stop(tp);
6794 } 6796 }
6795 6797
6796 cancel_work_sync(&tp->wk.work);
6797
6798 netif_napi_del(&tp->napi); 6798 netif_napi_del(&tp->napi);
6799 6799
6800 unregister_netdev(dev); 6800 unregister_netdev(dev);
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index b74a60ab9ac7..2a469b27a506 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -1209,7 +1209,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1209 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); 1209 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1210 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 1210 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1211 1211
1212 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index); 1212 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
1213 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1214 rxq_index);
1213 rc = efx_filter_set_ipv4_full(&spec, ip->protocol, 1215 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1214 ip->daddr, ports[1], ip->saddr, ports[0]); 1216 ip->daddr, ports[1], ip->saddr, ports[0]);
1215 if (rc) 1217 if (rc)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index eb4aea3fe793..f5d7ad75e479 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1318,7 +1318,7 @@ static void sis900_timer(unsigned long data)
1318 if (duplex){ 1318 if (duplex){
1319 sis900_set_mode(sis_priv, speed, duplex); 1319 sis900_set_mode(sis_priv, speed, duplex);
1320 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1320 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1321 netif_start_queue(net_dev); 1321 netif_carrier_on(net_dev);
1322 } 1322 }
1323 1323
1324 sis_priv->timer.expires = jiffies + HZ; 1324 sis_priv->timer.expires = jiffies + HZ;
@@ -1336,10 +1336,8 @@ static void sis900_timer(unsigned long data)
1336 status = sis900_default_phy(net_dev); 1336 status = sis900_default_phy(net_dev);
1337 mii_phy = sis_priv->mii; 1337 mii_phy = sis_priv->mii;
1338 1338
1339 if (status & MII_STAT_LINK){ 1339 if (status & MII_STAT_LINK)
1340 sis900_check_mode(net_dev, mii_phy); 1340 sis900_check_mode(net_dev, mii_phy);
1341 netif_carrier_on(net_dev);
1342 }
1343 } else { 1341 } else {
1344 /* Link ON -> OFF */ 1342 /* Link ON -> OFF */
1345 if (!(status & MII_STAT_LINK)){ 1343 if (!(status & MII_STAT_LINK)){
@@ -1612,12 +1610,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1612 unsigned int index_cur_tx, index_dirty_tx; 1610 unsigned int index_cur_tx, index_dirty_tx;
1613 unsigned int count_dirty_tx; 1611 unsigned int count_dirty_tx;
1614 1612
1615 /* Don't transmit data before the complete of auto-negotiation */
1616 if(!sis_priv->autong_complete){
1617 netif_stop_queue(net_dev);
1618 return NETDEV_TX_BUSY;
1619 }
1620
1621 spin_lock_irqsave(&sis_priv->lock, flags); 1613 spin_lock_irqsave(&sis_priv->lock, flags);
1622 1614
1623 /* Calculate the next Tx descriptor entry. */ 1615 /* Calculate the next Tx descriptor entry. */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 05a1674e204f..22a7a4336211 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1867,7 +1867,7 @@ static int cpsw_probe(struct platform_device *pdev)
1867 1867
1868 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1868 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1869 for (i = res->start; i <= res->end; i++) { 1869 for (i = res->start; i <= res->end; i++) {
1870 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, 1870 if (request_irq(i, cpsw_interrupt, 0,
1871 dev_name(&pdev->dev), priv)) { 1871 dev_name(&pdev->dev), priv)) {
1872 dev_err(priv->dev, "error attaching irq\n"); 1872 dev_err(priv->dev, "error attaching irq\n");
1873 goto clean_ale_ret; 1873 goto clean_ale_ret;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 07b176bcf929..1a222bce4bd7 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1568,8 +1568,7 @@ static int emac_dev_open(struct net_device *ndev)
1568 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1568 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1569 for (i = res->start; i <= res->end; i++) { 1569 for (i = res->start; i <= res->end; i++) {
1570 if (devm_request_irq(&priv->pdev->dev, i, emac_irq, 1570 if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
1571 IRQF_DISABLED, 1571 0, ndev->name, ndev))
1572 ndev->name, ndev))
1573 goto rollback; 1572 goto rollback;
1574 } 1573 }
1575 k++; 1574 k++;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4dccead586be..23a0fff0df52 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -431,8 +431,8 @@ static int netvsc_probe(struct hv_device *dev,
431 net->netdev_ops = &device_ops; 431 net->netdev_ops = &device_ops;
432 432
433 /* TODO: Add GSO and Checksum offload */ 433 /* TODO: Add GSO and Checksum offload */
434 net->hw_features = NETIF_F_SG; 434 net->hw_features = 0;
435 net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX; 435 net->features = NETIF_F_HW_VLAN_CTAG_TX;
436 436
437 SET_ETHTOOL_OPS(net, &ethtool_ops); 437 SET_ETHTOOL_OPS(net, &ethtool_ops);
438 SET_NETDEV_DEV(net, &dev->device); 438 SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 18373b6ae37d..d0f9c2fd1d4f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,8 +337,11 @@ static int macvlan_open(struct net_device *dev)
337 int err; 337 int err;
338 338
339 if (vlan->port->passthru) { 339 if (vlan->port->passthru) {
340 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) 340 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
341 dev_set_promiscuity(lowerdev, 1); 341 err = dev_set_promiscuity(lowerdev, 1);
342 if (err < 0)
343 goto out;
344 }
342 goto hash_add; 345 goto hash_add;
343 } 346 }
344 347
@@ -863,6 +866,18 @@ static int macvlan_changelink(struct net_device *dev,
863 struct nlattr *tb[], struct nlattr *data[]) 866 struct nlattr *tb[], struct nlattr *data[])
864{ 867{
865 struct macvlan_dev *vlan = netdev_priv(dev); 868 struct macvlan_dev *vlan = netdev_priv(dev);
869 enum macvlan_mode mode;
870 bool set_mode = false;
871
872 /* Validate mode, but don't set yet: setting flags may fail. */
873 if (data && data[IFLA_MACVLAN_MODE]) {
874 set_mode = true;
875 mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
876 /* Passthrough mode can't be set or cleared dynamically */
877 if ((mode == MACVLAN_MODE_PASSTHRU) !=
878 (vlan->mode == MACVLAN_MODE_PASSTHRU))
879 return -EINVAL;
880 }
866 881
867 if (data && data[IFLA_MACVLAN_FLAGS]) { 882 if (data && data[IFLA_MACVLAN_FLAGS]) {
868 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 883 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
@@ -879,8 +894,8 @@ static int macvlan_changelink(struct net_device *dev,
879 } 894 }
880 vlan->flags = flags; 895 vlan->flags = flags;
881 } 896 }
882 if (data && data[IFLA_MACVLAN_MODE]) 897 if (set_mode)
883 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 898 vlan->mode = mode;
884 return 0; 899 return 0;
885} 900}
886 901
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 876c72246ae9..a98fb0ed6aef 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -698,6 +698,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
698 return 0; 698 return 0;
699} 699}
700 700
701static unsigned long iov_pages(const struct iovec *iv, int offset,
702 unsigned long nr_segs)
703{
704 unsigned long seg, base;
705 int pages = 0, len, size;
706
707 while (nr_segs && (offset >= iv->iov_len)) {
708 offset -= iv->iov_len;
709 ++iv;
710 --nr_segs;
711 }
712
713 for (seg = 0; seg < nr_segs; seg++) {
714 base = (unsigned long)iv[seg].iov_base + offset;
715 len = iv[seg].iov_len - offset;
716 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
717 pages += size;
718 offset = 0;
719 }
720
721 return pages;
722}
701 723
702/* Get packet from user space buffer */ 724/* Get packet from user space buffer */
703static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 725static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
@@ -744,31 +766,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
744 if (unlikely(count > UIO_MAXIOV)) 766 if (unlikely(count > UIO_MAXIOV))
745 goto err; 767 goto err;
746 768
747 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) 769 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
748 zerocopy = true; 770 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
749
750 if (zerocopy) {
751 /* Userspace may produce vectors with count greater than
752 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
753 * to let the rest of data to be fit in the frags.
754 */
755 if (count > MAX_SKB_FRAGS) {
756 copylen = iov_length(iv, count - MAX_SKB_FRAGS);
757 if (copylen < vnet_hdr_len)
758 copylen = 0;
759 else
760 copylen -= vnet_hdr_len;
761 }
762 /* There are 256 bytes to be copied in skb, so there is enough
763 * room for skb expand head in case it is used.
764 * The rest buffer is mapped from userspace.
765 */
766 if (copylen < vnet_hdr.hdr_len)
767 copylen = vnet_hdr.hdr_len;
768 if (!copylen)
769 copylen = GOODCOPY_LEN;
770 linear = copylen; 771 linear = copylen;
771 } else { 772 if (iov_pages(iv, vnet_hdr_len + copylen, count)
773 <= MAX_SKB_FRAGS)
774 zerocopy = true;
775 }
776
777 if (!zerocopy) {
772 copylen = len; 778 copylen = len;
773 linear = vnet_hdr.hdr_len; 779 linear = vnet_hdr.hdr_len;
774 } 780 }
@@ -780,9 +786,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
780 786
781 if (zerocopy) 787 if (zerocopy)
782 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); 788 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
783 else 789 else {
784 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, 790 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
785 len); 791 len);
792 if (!err && m && m->msg_control) {
793 struct ubuf_info *uarg = m->msg_control;
794 uarg->callback(uarg, false);
795 }
796 }
797
786 if (err) 798 if (err)
787 goto err_kfree; 799 goto err_kfree;
788 800
@@ -873,7 +885,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
873 __be16 h_vlan_proto; 885 __be16 h_vlan_proto;
874 __be16 h_vlan_TCI; 886 __be16 h_vlan_TCI;
875 } veth; 887 } veth;
876 veth.h_vlan_proto = htons(ETH_P_8021Q); 888 veth.h_vlan_proto = skb->vlan_proto;
877 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 889 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
878 890
879 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 891 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
@@ -1107,6 +1119,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1107 rtnl_lock(); 1119 rtnl_lock();
1108 ret = macvtap_ioctl_set_queue(file, u); 1120 ret = macvtap_ioctl_set_queue(file, u);
1109 rtnl_unlock(); 1121 rtnl_unlock();
1122 return ret;
1110 1123
1111 case TUNGETFEATURES: 1124 case TUNGETFEATURES:
1112 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | 1125 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 61d3f4ebf52e..7f25e49ae37f 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -40,7 +40,7 @@ struct sun4i_mdio_data {
40static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 40static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
41{ 41{
42 struct sun4i_mdio_data *data = bus->priv; 42 struct sun4i_mdio_data *data = bus->priv;
43 unsigned long start_jiffies; 43 unsigned long timeout_jiffies;
44 int value; 44 int value;
45 45
46 /* issue the phy address and reg */ 46 /* issue the phy address and reg */
@@ -49,10 +49,9 @@ static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
49 writel(0x1, data->membase + EMAC_MAC_MCMD_REG); 49 writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
50 50
51 /* Wait read complete */ 51 /* Wait read complete */
52 start_jiffies = jiffies; 52 timeout_jiffies = jiffies + MDIO_TIMEOUT;
53 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { 53 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
54 if (time_after(start_jiffies, 54 if (time_is_before_jiffies(timeout_jiffies))
55 start_jiffies + MDIO_TIMEOUT))
56 return -ETIMEDOUT; 55 return -ETIMEDOUT;
57 msleep(1); 56 msleep(1);
58 } 57 }
@@ -69,7 +68,7 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
69 u16 value) 68 u16 value)
70{ 69{
71 struct sun4i_mdio_data *data = bus->priv; 70 struct sun4i_mdio_data *data = bus->priv;
72 unsigned long start_jiffies; 71 unsigned long timeout_jiffies;
73 72
74 /* issue the phy address and reg */ 73 /* issue the phy address and reg */
75 writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG); 74 writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
@@ -77,10 +76,9 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
77 writel(0x1, data->membase + EMAC_MAC_MCMD_REG); 76 writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
78 77
79 /* Wait read complete */ 78 /* Wait read complete */
80 start_jiffies = jiffies; 79 timeout_jiffies = jiffies + MDIO_TIMEOUT;
81 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { 80 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
82 if (time_after(start_jiffies, 81 if (time_is_before_jiffies(timeout_jiffies))
83 start_jiffies + MDIO_TIMEOUT))
84 return -ETIMEDOUT; 82 return -ETIMEDOUT;
85 msleep(1); 83 msleep(1);
86 } 84 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5cdcf92eb310..db690a372260 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1035,6 +1035,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
1035 return 0; 1035 return 0;
1036} 1036}
1037 1037
1038static unsigned long iov_pages(const struct iovec *iv, int offset,
1039 unsigned long nr_segs)
1040{
1041 unsigned long seg, base;
1042 int pages = 0, len, size;
1043
1044 while (nr_segs && (offset >= iv->iov_len)) {
1045 offset -= iv->iov_len;
1046 ++iv;
1047 --nr_segs;
1048 }
1049
1050 for (seg = 0; seg < nr_segs; seg++) {
1051 base = (unsigned long)iv[seg].iov_base + offset;
1052 len = iv[seg].iov_len - offset;
1053 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1054 pages += size;
1055 offset = 0;
1056 }
1057
1058 return pages;
1059}
1060
1038/* Get packet from user space buffer */ 1061/* Get packet from user space buffer */
1039static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1062static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1040 void *msg_control, const struct iovec *iv, 1063 void *msg_control, const struct iovec *iv,
@@ -1082,32 +1105,18 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1082 return -EINVAL; 1105 return -EINVAL;
1083 } 1106 }
1084 1107
1085 if (msg_control) 1108 if (msg_control) {
1086 zerocopy = true; 1109 /* There are 256 bytes to be copied in skb, so there is
1087 1110 * enough room for skb expand head in case it is used.
1088 if (zerocopy) {
1089 /* Userspace may produce vectors with count greater than
1090 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1091 * to let the rest of data to be fit in the frags.
1092 */
1093 if (count > MAX_SKB_FRAGS) {
1094 copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1095 if (copylen < offset)
1096 copylen = 0;
1097 else
1098 copylen -= offset;
1099 } else
1100 copylen = 0;
1101 /* There are 256 bytes to be copied in skb, so there is enough
1102 * room for skb expand head in case it is used.
1103 * The rest of the buffer is mapped from userspace. 1111 * The rest of the buffer is mapped from userspace.
1104 */ 1112 */
1105 if (copylen < gso.hdr_len) 1113 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1106 copylen = gso.hdr_len;
1107 if (!copylen)
1108 copylen = GOODCOPY_LEN;
1109 linear = copylen; 1114 linear = copylen;
1110 } else { 1115 if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1116 zerocopy = true;
1117 }
1118
1119 if (!zerocopy) {
1111 copylen = len; 1120 copylen = len;
1112 linear = gso.hdr_len; 1121 linear = gso.hdr_len;
1113 } 1122 }
@@ -1121,8 +1130,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1121 1130
1122 if (zerocopy) 1131 if (zerocopy)
1123 err = zerocopy_sg_from_iovec(skb, iv, offset, count); 1132 err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1124 else 1133 else {
1125 err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len); 1134 err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1135 if (!err && msg_control) {
1136 struct ubuf_info *uarg = msg_control;
1137 uarg->callback(uarg, false);
1138 }
1139 }
1126 1140
1127 if (err) { 1141 if (err) {
1128 tun->dev->stats.rx_dropped++; 1142 tun->dev->stats.rx_dropped++;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1e3c302d94fe..2bc87e3a8141 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,10 +1029,10 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
1029 dev->mii.supports_gmii = 1; 1029 dev->mii.supports_gmii = 1;
1030 1030
1031 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1031 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1032 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1032 NETIF_F_RXCSUM;
1033 1033
1034 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1034 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1035 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1035 NETIF_F_RXCSUM;
1036 1036
1037 /* Enable checksum offload */ 1037 /* Enable checksum offload */
1038 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | 1038 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@ -1173,7 +1173,6 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
1173 if (((skb->len + 8) % frame_size) == 0) 1173 if (((skb->len + 8) % frame_size) == 0)
1174 tx_hdr2 |= 0x80008000; /* Enable padding */ 1174 tx_hdr2 |= 0x80008000; /* Enable padding */
1175 1175
1176 skb_linearize(skb);
1177 headroom = skb_headroom(skb); 1176 headroom = skb_headroom(skb);
1178 tailroom = skb_tailroom(skb); 1177 tailroom = skb_tailroom(skb);
1179 1178
@@ -1317,10 +1316,10 @@ static int ax88179_reset(struct usbnet *dev)
1317 1, 1, tmp); 1316 1, 1, tmp);
1318 1317
1319 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1318 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1320 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1319 NETIF_F_RXCSUM;
1321 1320
1322 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1321 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1323 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1322 NETIF_F_RXCSUM;
1324 1323
1325 /* Enable checksum offload */ 1324 /* Enable checksum offload */
1326 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | 1325 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ee13f9eb740c..11c51f275366 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -344,17 +344,41 @@ static const int multicast_filter_limit = 32;
344static 344static
345int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) 345int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
346{ 346{
347 return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 347 int ret;
348 void *tmp;
349
350 tmp = kmalloc(size, GFP_KERNEL);
351 if (!tmp)
352 return -ENOMEM;
353
354 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
348 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 355 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
349 value, index, data, size, 500); 356 value, index, tmp, size, 500);
357
358 memcpy(data, tmp, size);
359 kfree(tmp);
360
361 return ret;
350} 362}
351 363
352static 364static
353int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) 365int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
354{ 366{
355 return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), 367 int ret;
368 void *tmp;
369
370 tmp = kmalloc(size, GFP_KERNEL);
371 if (!tmp)
372 return -ENOMEM;
373
374 memcpy(tmp, data, size);
375
376 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
356 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, 377 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
357 value, index, data, size, 500); 378 value, index, tmp, size, 500);
379
380 kfree(tmp);
381 return ret;
358} 382}
359 383
360static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, 384static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -490,37 +514,31 @@ int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
490 514
491static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) 515static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index)
492{ 516{
493 u32 data; 517 __le32 data;
494 518
495 if (type == MCU_TYPE_PLA) 519 generic_ocp_read(tp, index, sizeof(data), &data, type);
496 pla_ocp_read(tp, index, sizeof(data), &data);
497 else
498 usb_ocp_read(tp, index, sizeof(data), &data);
499 520
500 return __le32_to_cpu(data); 521 return __le32_to_cpu(data);
501} 522}
502 523
503static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) 524static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data)
504{ 525{
505 if (type == MCU_TYPE_PLA) 526 __le32 tmp = __cpu_to_le32(data);
506 pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data); 527
507 else 528 generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type);
508 usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
509} 529}
510 530
511static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) 531static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
512{ 532{
513 u32 data; 533 u32 data;
534 __le32 tmp;
514 u8 shift = index & 2; 535 u8 shift = index & 2;
515 536
516 index &= ~3; 537 index &= ~3;
517 538
518 if (type == MCU_TYPE_PLA) 539 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
519 pla_ocp_read(tp, index, sizeof(data), &data);
520 else
521 usb_ocp_read(tp, index, sizeof(data), &data);
522 540
523 data = __le32_to_cpu(data); 541 data = __le32_to_cpu(tmp);
524 data >>= (shift * 8); 542 data >>= (shift * 8);
525 data &= 0xffff; 543 data &= 0xffff;
526 544
@@ -529,7 +547,8 @@ static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
529 547
530static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) 548static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
531{ 549{
532 u32 tmp, mask = 0xffff; 550 u32 mask = 0xffff;
551 __le32 tmp;
533 u16 byen = BYTE_EN_WORD; 552 u16 byen = BYTE_EN_WORD;
534 u8 shift = index & 2; 553 u8 shift = index & 2;
535 554
@@ -542,34 +561,25 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
542 index &= ~3; 561 index &= ~3;
543 } 562 }
544 563
545 if (type == MCU_TYPE_PLA) 564 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
546 pla_ocp_read(tp, index, sizeof(tmp), &tmp);
547 else
548 usb_ocp_read(tp, index, sizeof(tmp), &tmp);
549 565
550 tmp = __le32_to_cpu(tmp) & ~mask; 566 data |= __le32_to_cpu(tmp) & ~mask;
551 tmp |= data; 567 tmp = __cpu_to_le32(data);
552 tmp = __cpu_to_le32(tmp);
553 568
554 if (type == MCU_TYPE_PLA) 569 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
555 pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
556 else
557 usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
558} 570}
559 571
560static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) 572static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
561{ 573{
562 u32 data; 574 u32 data;
575 __le32 tmp;
563 u8 shift = index & 3; 576 u8 shift = index & 3;
564 577
565 index &= ~3; 578 index &= ~3;
566 579
567 if (type == MCU_TYPE_PLA) 580 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
568 pla_ocp_read(tp, index, sizeof(data), &data);
569 else
570 usb_ocp_read(tp, index, sizeof(data), &data);
571 581
572 data = __le32_to_cpu(data); 582 data = __le32_to_cpu(tmp);
573 data >>= (shift * 8); 583 data >>= (shift * 8);
574 data &= 0xff; 584 data &= 0xff;
575 585
@@ -578,7 +588,8 @@ static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
578 588
579static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) 589static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
580{ 590{
581 u32 tmp, mask = 0xff; 591 u32 mask = 0xff;
592 __le32 tmp;
582 u16 byen = BYTE_EN_BYTE; 593 u16 byen = BYTE_EN_BYTE;
583 u8 shift = index & 3; 594 u8 shift = index & 3;
584 595
@@ -591,19 +602,12 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
591 index &= ~3; 602 index &= ~3;
592 } 603 }
593 604
594 if (type == MCU_TYPE_PLA) 605 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
595 pla_ocp_read(tp, index, sizeof(tmp), &tmp);
596 else
597 usb_ocp_read(tp, index, sizeof(tmp), &tmp);
598 606
599 tmp = __le32_to_cpu(tmp) & ~mask; 607 data |= __le32_to_cpu(tmp) & ~mask;
600 tmp |= data; 608 tmp = __cpu_to_le32(data);
601 tmp = __cpu_to_le32(tmp);
602 609
603 if (type == MCU_TYPE_PLA) 610 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
604 pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
605 else
606 usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
607} 611}
608 612
609static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) 613static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
@@ -685,21 +689,14 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
685static inline void set_ethernet_addr(struct r8152 *tp) 689static inline void set_ethernet_addr(struct r8152 *tp)
686{ 690{
687 struct net_device *dev = tp->netdev; 691 struct net_device *dev = tp->netdev;
688 u8 *node_id; 692 u8 node_id[8] = {0};
689
690 node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL);
691 if (!node_id) {
692 netif_err(tp, probe, dev, "out of memory");
693 return;
694 }
695 693
696 if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0) 694 if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
697 netif_notice(tp, probe, dev, "inet addr fail\n"); 695 netif_notice(tp, probe, dev, "inet addr fail\n");
698 else { 696 else {
699 memcpy(dev->dev_addr, node_id, dev->addr_len); 697 memcpy(dev->dev_addr, node_id, dev->addr_len);
700 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 698 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
701 } 699 }
702 kfree(node_id);
703} 700}
704 701
705static int rtl8152_set_mac_address(struct net_device *netdev, void *p) 702static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
@@ -882,15 +879,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
882static void _rtl8152_set_rx_mode(struct net_device *netdev) 879static void _rtl8152_set_rx_mode(struct net_device *netdev)
883{ 880{
884 struct r8152 *tp = netdev_priv(netdev); 881 struct r8152 *tp = netdev_priv(netdev);
885 u32 tmp, *mc_filter; /* Multicast hash filter */ 882 u32 mc_filter[2]; /* Multicast hash filter */
883 __le32 tmp[2];
886 u32 ocp_data; 884 u32 ocp_data;
887 885
888 mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL);
889 if (!mc_filter) {
890 netif_err(tp, link, netdev, "out of memory");
891 return;
892 }
893
894 clear_bit(RTL8152_SET_RX_MODE, &tp->flags); 886 clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
895 netif_stop_queue(netdev); 887 netif_stop_queue(netdev);
896 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 888 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -918,14 +910,12 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
918 } 910 }
919 } 911 }
920 912
921 tmp = mc_filter[0]; 913 tmp[0] = __cpu_to_le32(swab32(mc_filter[1]));
922 mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1])); 914 tmp[1] = __cpu_to_le32(swab32(mc_filter[0]));
923 mc_filter[1] = __cpu_to_le32(swab32(tmp));
924 915
925 pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter); 916 pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp);
926 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 917 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
927 netif_wake_queue(netdev); 918 netif_wake_queue(netdev);
928 kfree(mc_filter);
929} 919}
930 920
931static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, 921static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
index 852392269718..2df2f4fb42a7 100644
--- a/drivers/net/usb/r815x.c
+++ b/drivers/net/usb/r815x.c
@@ -24,34 +24,43 @@
24 24
25static int pla_read_word(struct usb_device *udev, u16 index) 25static int pla_read_word(struct usb_device *udev, u16 index)
26{ 26{
27 int data, ret; 27 int ret;
28 u8 shift = index & 2; 28 u8 shift = index & 2;
29 __le32 ocp_data; 29 __le32 *tmp;
30
31 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
32 if (!tmp)
33 return -ENOMEM;
30 34
31 index &= ~3; 35 index &= ~3;
32 36
33 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 37 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
34 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, 38 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
35 index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data), 39 index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
36 500);
37 if (ret < 0) 40 if (ret < 0)
38 return ret; 41 goto out2;
39 42
40 data = __le32_to_cpu(ocp_data); 43 ret = __le32_to_cpu(*tmp);
41 data >>= (shift * 8); 44 ret >>= (shift * 8);
42 data &= 0xffff; 45 ret &= 0xffff;
43 46
44 return data; 47out2:
48 kfree(tmp);
49 return ret;
45} 50}
46 51
47static int pla_write_word(struct usb_device *udev, u16 index, u32 data) 52static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
48{ 53{
49 __le32 ocp_data; 54 __le32 *tmp;
50 u32 mask = 0xffff; 55 u32 mask = 0xffff;
51 u16 byen = BYTE_EN_WORD; 56 u16 byen = BYTE_EN_WORD;
52 u8 shift = index & 2; 57 u8 shift = index & 2;
53 int ret; 58 int ret;
54 59
60 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
61 if (!tmp)
62 return -ENOMEM;
63
55 data &= mask; 64 data &= mask;
56 65
57 if (shift) { 66 if (shift) {
@@ -63,19 +72,20 @@ static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
63 72
64 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 73 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
65 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, 74 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
66 index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data), 75 index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
67 500);
68 if (ret < 0) 76 if (ret < 0)
69 return ret; 77 goto out3;
70 78
71 data |= __le32_to_cpu(ocp_data) & ~mask; 79 data |= __le32_to_cpu(*tmp) & ~mask;
72 ocp_data = __cpu_to_le32(data); 80 *tmp = __cpu_to_le32(data);
73 81
74 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 82 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
75 RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, 83 RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
76 index, MCU_TYPE_PLA | byen, &ocp_data, 84 index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
77 sizeof(ocp_data), 500); 85 500);
78 86
87out3:
88 kfree(tmp);
79 return ret; 89 return ret;
80} 90}
81 91
@@ -116,11 +126,18 @@ out1:
116static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) 126static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
117{ 127{
118 struct usbnet *dev = netdev_priv(netdev); 128 struct usbnet *dev = netdev_priv(netdev);
129 int ret;
119 130
120 if (phy_id != R815x_PHY_ID) 131 if (phy_id != R815x_PHY_ID)
121 return -EINVAL; 132 return -EINVAL;
122 133
123 return ocp_reg_read(dev, BASE_MII + reg * 2); 134 if (usb_autopm_get_interface(dev->intf) < 0)
135 return -ENODEV;
136
137 ret = ocp_reg_read(dev, BASE_MII + reg * 2);
138
139 usb_autopm_put_interface(dev->intf);
140 return ret;
124} 141}
125 142
126static 143static
@@ -131,7 +148,12 @@ void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
131 if (phy_id != R815x_PHY_ID) 148 if (phy_id != R815x_PHY_ID)
132 return; 149 return;
133 150
151 if (usb_autopm_get_interface(dev->intf) < 0)
152 return;
153
134 ocp_reg_write(dev, BASE_MII + reg * 2, val); 154 ocp_reg_write(dev, BASE_MII + reg * 2, val);
155
156 usb_autopm_put_interface(dev->intf);
135} 157}
136 158
137static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) 159static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -150,7 +172,7 @@ static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
150 dev->mii.phy_id = R815x_PHY_ID; 172 dev->mii.phy_id = R815x_PHY_ID;
151 dev->mii.supports_gmii = 1; 173 dev->mii.supports_gmii = 1;
152 174
153 return 0; 175 return status;
154} 176}
155 177
156static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) 178static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -169,7 +191,7 @@ static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
169 dev->mii.phy_id = R815x_PHY_ID; 191 dev->mii.phy_id = R815x_PHY_ID;
170 dev->mii.supports_gmii = 0; 192 dev->mii.supports_gmii = 0;
171 193
172 return 0; 194 return status;
173} 195}
174 196
175static const struct driver_info r8152_info = { 197static const struct driver_info r8152_info = {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 75409748c774..66ebbacf066f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -45,7 +45,6 @@
45#define EEPROM_MAC_OFFSET (0x01) 45#define EEPROM_MAC_OFFSET (0x01)
46#define DEFAULT_TX_CSUM_ENABLE (true) 46#define DEFAULT_TX_CSUM_ENABLE (true)
47#define DEFAULT_RX_CSUM_ENABLE (true) 47#define DEFAULT_RX_CSUM_ENABLE (true)
48#define DEFAULT_TSO_ENABLE (true)
49#define SMSC75XX_INTERNAL_PHY_ID (1) 48#define SMSC75XX_INTERNAL_PHY_ID (1)
50#define SMSC75XX_TX_OVERHEAD (8) 49#define SMSC75XX_TX_OVERHEAD (8)
51#define MAX_RX_FIFO_SIZE (20 * 1024) 50#define MAX_RX_FIFO_SIZE (20 * 1024)
@@ -1410,17 +1409,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1410 1409
1411 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); 1410 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
1412 1411
1413 if (DEFAULT_TX_CSUM_ENABLE) { 1412 if (DEFAULT_TX_CSUM_ENABLE)
1414 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1413 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1415 if (DEFAULT_TSO_ENABLE) 1414
1416 dev->net->features |= NETIF_F_SG |
1417 NETIF_F_TSO | NETIF_F_TSO6;
1418 }
1419 if (DEFAULT_RX_CSUM_ENABLE) 1415 if (DEFAULT_RX_CSUM_ENABLE)
1420 dev->net->features |= NETIF_F_RXCSUM; 1416 dev->net->features |= NETIF_F_RXCSUM;
1421 1417
1422 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1418 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1423 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; 1419 NETIF_F_RXCSUM;
1424 1420
1425 ret = smsc75xx_wait_ready(dev, 0); 1421 ret = smsc75xx_wait_ready(dev, 0);
1426 if (ret < 0) { 1422 if (ret < 0) {
@@ -2200,8 +2196,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
2200{ 2196{
2201 u32 tx_cmd_a, tx_cmd_b; 2197 u32 tx_cmd_a, tx_cmd_b;
2202 2198
2203 skb_linearize(skb);
2204
2205 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 2199 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
2206 struct sk_buff *skb2 = 2200 struct sk_buff *skb2 =
2207 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); 2201 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index da866523cf20..eee1f19ef1e9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -269,6 +269,7 @@ static void veth_setup(struct net_device *dev)
269 dev->ethtool_ops = &veth_ethtool_ops; 269 dev->ethtool_ops = &veth_ethtool_ops;
270 dev->features |= NETIF_F_LLTX; 270 dev->features |= NETIF_F_LLTX;
271 dev->features |= VETH_FEATURES; 271 dev->features |= VETH_FEATURES;
272 dev->vlan_features = dev->features;
272 dev->destructor = veth_dev_free; 273 dev->destructor = veth_dev_free;
273 274
274 dev->hw_features = VETH_FEATURES; 275 dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 0ba1e7edbb1b..f4c6db419ddb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -136,7 +136,8 @@ struct vxlan_dev {
136 u32 flags; /* VXLAN_F_* below */ 136 u32 flags; /* VXLAN_F_* below */
137 137
138 struct work_struct sock_work; 138 struct work_struct sock_work;
139 struct work_struct igmp_work; 139 struct work_struct igmp_join;
140 struct work_struct igmp_leave;
140 141
141 unsigned long age_interval; 142 unsigned long age_interval;
142 struct timer_list age_timer; 143 struct timer_list age_timer;
@@ -736,7 +737,6 @@ static bool vxlan_snoop(struct net_device *dev,
736 return false; 737 return false;
737} 738}
738 739
739
740/* See if multicast group is already in use by other ID */ 740/* See if multicast group is already in use by other ID */
741static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip) 741static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
742{ 742{
@@ -770,12 +770,13 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
770 queue_work(vxlan_wq, &vs->del_work); 770 queue_work(vxlan_wq, &vs->del_work);
771} 771}
772 772
773/* Callback to update multicast group membership. 773/* Callback to update multicast group membership when first VNI on
774 * Scheduled when vxlan goes up/down. 774 * multicast asddress is brought up
775 * Done as workqueue because ip_mc_join_group acquires RTNL.
775 */ 776 */
776static void vxlan_igmp_work(struct work_struct *work) 777static void vxlan_igmp_join(struct work_struct *work)
777{ 778{
778 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work); 779 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
779 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id); 780 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
780 struct vxlan_sock *vs = vxlan->vn_sock; 781 struct vxlan_sock *vs = vxlan->vn_sock;
781 struct sock *sk = vs->sock->sk; 782 struct sock *sk = vs->sock->sk;
@@ -785,10 +786,27 @@ static void vxlan_igmp_work(struct work_struct *work)
785 }; 786 };
786 787
787 lock_sock(sk); 788 lock_sock(sk);
788 if (vxlan_group_used(vn, vxlan->default_dst.remote_ip)) 789 ip_mc_join_group(sk, &mreq);
789 ip_mc_join_group(sk, &mreq); 790 release_sock(sk);
790 else 791
791 ip_mc_leave_group(sk, &mreq); 792 vxlan_sock_release(vn, vs);
793 dev_put(vxlan->dev);
794}
795
796/* Inverse of vxlan_igmp_join when last VNI is brought down */
797static void vxlan_igmp_leave(struct work_struct *work)
798{
799 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
800 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
801 struct vxlan_sock *vs = vxlan->vn_sock;
802 struct sock *sk = vs->sock->sk;
803 struct ip_mreqn mreq = {
804 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
805 .imr_ifindex = vxlan->default_dst.remote_ifindex,
806 };
807
808 lock_sock(sk);
809 ip_mc_leave_group(sk, &mreq);
792 release_sock(sk); 810 release_sock(sk);
793 811
794 vxlan_sock_release(vn, vs); 812 vxlan_sock_release(vn, vs);
@@ -1359,6 +1377,7 @@ static void vxlan_uninit(struct net_device *dev)
1359/* Start ageing timer and join group when device is brought up */ 1377/* Start ageing timer and join group when device is brought up */
1360static int vxlan_open(struct net_device *dev) 1378static int vxlan_open(struct net_device *dev)
1361{ 1379{
1380 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1362 struct vxlan_dev *vxlan = netdev_priv(dev); 1381 struct vxlan_dev *vxlan = netdev_priv(dev);
1363 struct vxlan_sock *vs = vxlan->vn_sock; 1382 struct vxlan_sock *vs = vxlan->vn_sock;
1364 1383
@@ -1366,10 +1385,11 @@ static int vxlan_open(struct net_device *dev)
1366 if (!vs) 1385 if (!vs)
1367 return -ENOTCONN; 1386 return -ENOTCONN;
1368 1387
1369 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { 1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1389 ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1370 vxlan_sock_hold(vs); 1390 vxlan_sock_hold(vs);
1371 dev_hold(dev); 1391 dev_hold(dev);
1372 queue_work(vxlan_wq, &vxlan->igmp_work); 1392 queue_work(vxlan_wq, &vxlan->igmp_join);
1373 } 1393 }
1374 1394
1375 if (vxlan->age_interval) 1395 if (vxlan->age_interval)
@@ -1400,13 +1420,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
1400/* Cleanup timer and forwarding table on shutdown */ 1420/* Cleanup timer and forwarding table on shutdown */
1401static int vxlan_stop(struct net_device *dev) 1421static int vxlan_stop(struct net_device *dev)
1402{ 1422{
1423 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1403 struct vxlan_dev *vxlan = netdev_priv(dev); 1424 struct vxlan_dev *vxlan = netdev_priv(dev);
1404 struct vxlan_sock *vs = vxlan->vn_sock; 1425 struct vxlan_sock *vs = vxlan->vn_sock;
1405 1426
1406 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { 1427 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1428 ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1407 vxlan_sock_hold(vs); 1429 vxlan_sock_hold(vs);
1408 dev_hold(dev); 1430 dev_hold(dev);
1409 queue_work(vxlan_wq, &vxlan->igmp_work); 1431 queue_work(vxlan_wq, &vxlan->igmp_leave);
1410 } 1432 }
1411 1433
1412 del_timer_sync(&vxlan->age_timer); 1434 del_timer_sync(&vxlan->age_timer);
@@ -1471,7 +1493,8 @@ static void vxlan_setup(struct net_device *dev)
1471 1493
1472 INIT_LIST_HEAD(&vxlan->next); 1494 INIT_LIST_HEAD(&vxlan->next);
1473 spin_lock_init(&vxlan->hash_lock); 1495 spin_lock_init(&vxlan->hash_lock);
1474 INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work); 1496 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
1497 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
1475 INIT_WORK(&vxlan->sock_work, vxlan_sock_work); 1498 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
1476 1499
1477 init_timer_deferrable(&vxlan->age_timer); 1500 init_timer_deferrable(&vxlan->age_timer);
@@ -1767,9 +1790,15 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1767 1790
1768static void vxlan_dellink(struct net_device *dev, struct list_head *head) 1791static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1769{ 1792{
1793 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1770 struct vxlan_dev *vxlan = netdev_priv(dev); 1794 struct vxlan_dev *vxlan = netdev_priv(dev);
1771 1795
1796 flush_workqueue(vxlan_wq);
1797
1798 spin_lock(&vn->sock_lock);
1772 hlist_del_rcu(&vxlan->hlist); 1799 hlist_del_rcu(&vxlan->hlist);
1800 spin_unlock(&vn->sock_lock);
1801
1773 list_del(&vxlan->next); 1802 list_del(&vxlan->next);
1774 unregister_netdevice_queue(dev, head); 1803 unregister_netdevice_queue(dev, head);
1775} 1804}
@@ -1872,10 +1901,12 @@ static __net_exit void vxlan_exit_net(struct net *net)
1872{ 1901{
1873 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1902 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1874 struct vxlan_dev *vxlan; 1903 struct vxlan_dev *vxlan;
1904 LIST_HEAD(list);
1875 1905
1876 rtnl_lock(); 1906 rtnl_lock();
1877 list_for_each_entry(vxlan, &vn->vxlan_list, next) 1907 list_for_each_entry(vxlan, &vn->vxlan_list, next)
1878 dev_close(vxlan->dev); 1908 unregister_netdevice_queue(vxlan->dev, &list);
1909 unregister_netdevice_many(&list);
1879 rtnl_unlock(); 1910 rtnl_unlock();
1880} 1911}
1881 1912
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index cde58fe96254..82e8088ca9b4 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,6 +1,6 @@
1config ATH10K 1config ATH10K
2 tristate "Atheros 802.11ac wireless cards support" 2 tristate "Atheros 802.11ac wireless cards support"
3 depends on MAC80211 3 depends on MAC80211 && HAS_DMA
4 select ATH_COMMON 4 select ATH_COMMON
5 ---help--- 5 ---help---
6 This module adds support for wireless adapters based on 6 This module adds support for wireless adapters based on
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 81b686c6a376..40825d43322e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -325,7 +325,7 @@ ath5k_prepare_multicast(struct ieee80211_hw *hw,
325 struct netdev_hw_addr *ha; 325 struct netdev_hw_addr *ha;
326 326
327 mfilt[0] = 0; 327 mfilt[0] = 0;
328 mfilt[1] = 1; 328 mfilt[1] = 0;
329 329
330 netdev_hw_addr_list_for_each(ha, mc_list) { 330 netdev_hw_addr_list_for_each(ha, mc_list) {
331 /* calculate XOR of eight 6-bit values */ 331 /* calculate XOR of eight 6-bit values */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d1acfe98918a..1576d58291d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -610,7 +610,15 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
610 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 610 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
611 611
612 if (AR_SREV_9280_20_OR_LATER(ah)) { 612 if (AR_SREV_9280_20_OR_LATER(ah)) {
613 val = REG_READ(ah, AR_PCU_MISC_MODE2); 613 /*
614 * For AR9280 and above, there is a new feature that allows
615 * Multicast search based on both MAC Address and Key ID.
616 * By default, this feature is enabled. But since the driver
617 * is not using this feature, we switch it off; otherwise
618 * multicast search based on MAC addr only will fail.
619 */
620 val = REG_READ(ah, AR_PCU_MISC_MODE2) &
621 (~AR_ADHOC_MCAST_KEYID_ENABLE);
614 622
615 if (!AR_SREV_9271(ah)) 623 if (!AR_SREV_9271(ah))
616 val &= ~AR_PCU_MISC_MODE2_HWWAR1; 624 val &= ~AR_PCU_MISC_MODE2_HWWAR1;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 9e582e14da74..5205a3625e84 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1082,7 +1082,7 @@ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
1082 struct device *dev = &hif_dev->udev->dev; 1082 struct device *dev = &hif_dev->udev->dev;
1083 struct device *parent = dev->parent; 1083 struct device *parent = dev->parent;
1084 1084
1085 complete(&hif_dev->fw_done); 1085 complete_all(&hif_dev->fw_done);
1086 1086
1087 if (parent) 1087 if (parent)
1088 device_lock(parent); 1088 device_lock(parent);
@@ -1131,7 +1131,7 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
1131 1131
1132 release_firmware(fw); 1132 release_firmware(fw);
1133 hif_dev->flags |= HIF_USB_READY; 1133 hif_dev->flags |= HIF_USB_READY;
1134 complete(&hif_dev->fw_done); 1134 complete_all(&hif_dev->fw_done);
1135 1135
1136 return; 1136 return;
1137 1137
@@ -1295,7 +1295,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
1295 1295
1296 usb_set_intfdata(interface, NULL); 1296 usb_set_intfdata(interface, NULL);
1297 1297
1298 if (!unplugged && (hif_dev->flags & HIF_USB_START)) 1298 /* If firmware was loaded we should drop it
1299 * go back to first stage bootloader. */
1300 if (!unplugged && (hif_dev->flags & HIF_USB_READY))
1299 ath9k_hif_usb_reboot(udev); 1301 ath9k_hif_usb_reboot(udev);
1300 1302
1301 kfree(hif_dev); 1303 kfree(hif_dev);
@@ -1316,7 +1318,10 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
1316 if (!(hif_dev->flags & HIF_USB_START)) 1318 if (!(hif_dev->flags & HIF_USB_START))
1317 ath9k_htc_suspend(hif_dev->htc_handle); 1319 ath9k_htc_suspend(hif_dev->htc_handle);
1318 1320
1319 ath9k_hif_usb_dealloc_urbs(hif_dev); 1321 wait_for_completion(&hif_dev->fw_done);
1322
1323 if (hif_dev->flags & HIF_USB_READY)
1324 ath9k_hif_usb_dealloc_urbs(hif_dev);
1320 1325
1321 return 0; 1326 return 0;
1322} 1327}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 71a183ffc77f..c3676bf1d6c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -861,6 +861,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
861 if (error != 0) 861 if (error != 0)
862 goto err_rx; 862 goto err_rx;
863 863
864 ath9k_hw_disable(priv->ah);
864#ifdef CONFIG_MAC80211_LEDS 865#ifdef CONFIG_MAC80211_LEDS
865 /* must be initialized before ieee80211_register_hw */ 866 /* must be initialized before ieee80211_register_hw */
866 priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw, 867 priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index c59ae43b9b35..927992732620 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -146,6 +146,28 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
146 ARRAY_SIZE(bf->rates)); 146 ARRAY_SIZE(bf->rates));
147} 147}
148 148
149static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
150 struct sk_buff *skb)
151{
152 int q;
153
154 q = skb_get_queue_mapping(skb);
155 if (txq == sc->tx.uapsdq)
156 txq = sc->tx.txq_map[q];
157
158 if (txq != sc->tx.txq_map[q])
159 return;
160
161 if (WARN_ON(--txq->pending_frames < 0))
162 txq->pending_frames = 0;
163
164 if (txq->stopped &&
165 txq->pending_frames < sc->tx.txq_max_pending[q]) {
166 ieee80211_wake_queue(sc->hw, q);
167 txq->stopped = false;
168 }
169}
170
149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 171static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{ 172{
151 struct ath_txq *txq = tid->ac->txq; 173 struct ath_txq *txq = tid->ac->txq;
@@ -167,6 +189,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
167 if (!bf) { 189 if (!bf) {
168 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 190 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
169 if (!bf) { 191 if (!bf) {
192 ath_txq_skb_done(sc, txq, skb);
170 ieee80211_free_txskb(sc->hw, skb); 193 ieee80211_free_txskb(sc->hw, skb);
171 continue; 194 continue;
172 } 195 }
@@ -811,6 +834,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
811 834
812 if (!bf) { 835 if (!bf) {
813 __skb_unlink(skb, &tid->buf_q); 836 __skb_unlink(skb, &tid->buf_q);
837 ath_txq_skb_done(sc, txq, skb);
814 ieee80211_free_txskb(sc->hw, skb); 838 ieee80211_free_txskb(sc->hw, skb);
815 continue; 839 continue;
816 } 840 }
@@ -1824,6 +1848,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
1824 1848
1825 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 1849 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1826 if (!bf) { 1850 if (!bf) {
1851 ath_txq_skb_done(sc, txq, skb);
1827 ieee80211_free_txskb(sc->hw, skb); 1852 ieee80211_free_txskb(sc->hw, skb);
1828 return; 1853 return;
1829 } 1854 }
@@ -2090,6 +2115,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2090 2115
2091 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 2116 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2092 if (!bf) { 2117 if (!bf) {
2118 ath_txq_skb_done(sc, txq, skb);
2093 if (txctl->paprd) 2119 if (txctl->paprd)
2094 dev_kfree_skb_any(skb); 2120 dev_kfree_skb_any(skb);
2095 else 2121 else
@@ -2189,7 +2215,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2189 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2215 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2190 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2216 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2191 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2217 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2192 int q, padpos, padsize; 2218 int padpos, padsize;
2193 unsigned long flags; 2219 unsigned long flags;
2194 2220
2195 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2221 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@ -2225,21 +2251,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2225 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2251 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2226 2252
2227 __skb_queue_tail(&txq->complete_q, skb); 2253 __skb_queue_tail(&txq->complete_q, skb);
2228 2254 ath_txq_skb_done(sc, txq, skb);
2229 q = skb_get_queue_mapping(skb);
2230 if (txq == sc->tx.uapsdq)
2231 txq = sc->tx.txq_map[q];
2232
2233 if (txq == sc->tx.txq_map[q]) {
2234 if (WARN_ON(--txq->pending_frames < 0))
2235 txq->pending_frames = 0;
2236
2237 if (txq->stopped &&
2238 txq->pending_frames < sc->tx.txq_max_pending[q]) {
2239 ieee80211_wake_queue(sc->hw, q);
2240 txq->stopped = false;
2241 }
2242 }
2243} 2255}
2244 2256
2245static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 2257static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e8308ec30970..ab636767fbde 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -145,7 +145,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
145 le16_to_cpu(hdr.type), hdr.flags); 145 le16_to_cpu(hdr.type), hdr.flags);
146 if (len <= MAX_MBOXITEM_SIZE) { 146 if (len <= MAX_MBOXITEM_SIZE) {
147 int n = 0; 147 int n = 0;
148 unsigned char printbuf[16 * 3 + 2]; 148 char printbuf[16 * 3 + 2];
149 unsigned char databuf[MAX_MBOXITEM_SIZE]; 149 unsigned char databuf[MAX_MBOXITEM_SIZE];
150 void __iomem *src = wmi_buffer(wil, d.addr) + 150 void __iomem *src = wmi_buffer(wil, d.addr) +
151 sizeof(struct wil6210_mbox_hdr); 151 sizeof(struct wil6210_mbox_hdr);
@@ -416,7 +416,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
416 seq_printf(s, " SKB = %p\n", skb); 416 seq_printf(s, " SKB = %p\n", skb);
417 417
418 if (skb) { 418 if (skb) {
419 unsigned char printbuf[16 * 3 + 2]; 419 char printbuf[16 * 3 + 2];
420 int i = 0; 420 int i = 0;
421 int len = le16_to_cpu(d->dma.length); 421 int len = le16_to_cpu(d->dma.length);
422 void *p = skb->data; 422 void *p = skb->data;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8e8975562ec3..80099016d21f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -242,7 +242,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
242{ 242{
243 unsigned long flags; 243 unsigned long flags;
244 244
245 if (!ifp) 245 if (!ifp || !ifp->ndev)
246 return; 246 return;
247 247
248 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", 248 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index f0d9f7f6c83d..29b1f24c2d0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -1744,13 +1744,14 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1744 ulong flags; 1744 ulong flags;
1745 int fifo = BRCMF_FWS_FIFO_BCMC; 1745 int fifo = BRCMF_FWS_FIFO_BCMC;
1746 bool multicast = is_multicast_ether_addr(eh->h_dest); 1746 bool multicast = is_multicast_ether_addr(eh->h_dest);
1747 bool pae = eh->h_proto == htons(ETH_P_PAE);
1747 1748
1748 /* determine the priority */ 1749 /* determine the priority */
1749 if (!skb->priority) 1750 if (!skb->priority)
1750 skb->priority = cfg80211_classify8021d(skb); 1751 skb->priority = cfg80211_classify8021d(skb);
1751 1752
1752 drvr->tx_multicast += !!multicast; 1753 drvr->tx_multicast += !!multicast;
1753 if (ntohs(eh->h_proto) == ETH_P_PAE) 1754 if (pae)
1754 atomic_inc(&ifp->pend_8021x_cnt); 1755 atomic_inc(&ifp->pend_8021x_cnt);
1755 1756
1756 if (!brcmf_fws_fc_active(fws)) { 1757 if (!brcmf_fws_fc_active(fws)) {
@@ -1781,6 +1782,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1781 brcmf_fws_schedule_deq(fws); 1782 brcmf_fws_schedule_deq(fws);
1782 } else { 1783 } else {
1783 brcmf_err("drop skb: no hanger slot\n"); 1784 brcmf_err("drop skb: no hanger slot\n");
1785 if (pae) {
1786 atomic_dec(&ifp->pend_8021x_cnt);
1787 if (waitqueue_active(&ifp->pend_8021x_wait))
1788 wake_up(&ifp->pend_8021x_wait);
1789 }
1784 brcmu_pkt_buf_free_skb(skb); 1790 brcmu_pkt_buf_free_skb(skb);
1785 } 1791 }
1786 brcmf_fws_unlock(drvr, flags); 1792 brcmf_fws_unlock(drvr, flags);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 277b37ae7126..7fa71f73cfe8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1093,8 +1093,11 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
1093 brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); 1093 brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
1094 err = brcmf_fil_cmd_data_set(vif->ifp, 1094 err = brcmf_fil_cmd_data_set(vif->ifp,
1095 BRCMF_C_DISASSOC, NULL, 0); 1095 BRCMF_C_DISASSOC, NULL, 0);
1096 if (err) 1096 if (err) {
1097 brcmf_err("WLC_DISASSOC failed (%d)\n", err); 1097 brcmf_err("WLC_DISASSOC failed (%d)\n", err);
1098 cfg80211_disconnected(vif->wdev.netdev, 0,
1099 NULL, 0, GFP_KERNEL);
1100 }
1098 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); 1101 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
1099 } 1102 }
1100 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); 1103 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index 5862c373d714..e824d4d4a18d 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1165,7 +1165,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
1165 if (cw1200_handle_action_rx(priv, skb)) 1165 if (cw1200_handle_action_rx(priv, skb))
1166 return; 1166 return;
1167 } else if (ieee80211_is_beacon(frame->frame_control) && 1167 } else if (ieee80211_is_beacon(frame->frame_control) &&
1168 !arg->status && 1168 !arg->status && priv->vif &&
1169 !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid, 1169 !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
1170 ETH_ALEN)) { 1170 ETH_ALEN)) {
1171 const u8 *tim_ie; 1171 const u8 *tim_ie;
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 3952ddf2ddb2..1531a4fc0960 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -758,7 +758,7 @@ int iwl_alive_start(struct iwl_priv *priv)
758 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 758 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
759 if (ret) 759 if (ret)
760 return ret; 760 return ret;
761 } else { 761 } else if (priv->lib->bt_params) {
762 /* 762 /*
763 * default is 2-wire BT coexexistence support 763 * default is 2-wire BT coexexistence support
764 */ 764 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index ff8cc75c189d..a70c7b9d9bad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,6 +97,8 @@
97 97
98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
99 99
100#define APMG_RTC_INT_STT_RFKILL (0x10000000)
101
100/* Device system time */ 102/* Device system time */
101#define DEVICE_SYSTEM_TIME_REG 0xA0206C 103#define DEVICE_SYSTEM_TIME_REG 0xA0206C
102 104
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 7e5e5c2f9f87..83da884cf303 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -134,7 +134,7 @@ struct wowlan_key_data {
134 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; 134 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
135 struct iwl_wowlan_tkip_params_cmd *tkip; 135 struct iwl_wowlan_tkip_params_cmd *tkip;
136 bool error, use_rsc_tsc, use_tkip; 136 bool error, use_rsc_tsc, use_tkip;
137 int gtk_key_idx; 137 int wep_key_idx;
138}; 138};
139 139
140static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, 140static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
@@ -188,8 +188,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
188 wkc.wep_key.key_offset = 0; 188 wkc.wep_key.key_offset = 0;
189 } else { 189 } else {
190 /* others start at 1 */ 190 /* others start at 1 */
191 data->gtk_key_idx++; 191 data->wep_key_idx++;
192 wkc.wep_key.key_offset = data->gtk_key_idx; 192 wkc.wep_key.key_offset = data->wep_key_idx;
193 } 193 }
194 194
195 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC, 195 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
@@ -316,8 +316,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
316 mvm->ptk_ivlen = key->iv_len; 316 mvm->ptk_ivlen = key->iv_len;
317 mvm->ptk_icvlen = key->icv_len; 317 mvm->ptk_icvlen = key->icv_len;
318 } else { 318 } else {
319 data->gtk_key_idx++; 319 /*
320 key->hw_key_idx = data->gtk_key_idx; 320 * firmware only supports TSC/RSC for a single key,
321 * so if there are multiple keep overwriting them
322 * with new ones -- this relies on mac80211 doing
323 * list_add_tail().
324 */
325 key->hw_key_idx = 1;
321 mvm->gtk_ivlen = key->iv_len; 326 mvm->gtk_ivlen = key->iv_len;
322 mvm->gtk_icvlen = key->icv_len; 327 mvm->gtk_icvlen = key->icv_len;
323 } 328 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index e56ed2a84888..c24a744910ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -988,7 +988,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
988 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 988 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
989 char buf[100]; 989 char buf[100];
990 990
991 if (!dbgfs_dir) 991 /*
992 * Check if debugfs directory already exist before creating it.
993 * This may happen when, for example, resetting hw or suspend-resume
994 */
995 if (!dbgfs_dir || mvmvif->dbgfs_dir)
992 return; 996 return;
993 997
994 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); 998 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index b60d14151721..365095a0c3b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -69,7 +69,6 @@
69/* Scan Commands, Responses, Notifications */ 69/* Scan Commands, Responses, Notifications */
70 70
71/* Masks for iwl_scan_channel.type flags */ 71/* Masks for iwl_scan_channel.type flags */
72#define SCAN_CHANNEL_TYPE_PASSIVE 0
73#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0) 72#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
74#define SCAN_CHANNEL_NARROW_BAND BIT(22) 73#define SCAN_CHANNEL_NARROW_BAND BIT(22)
75 74
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e08683b20531..f19baf0dea6b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -257,7 +257,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
257 if (ret) 257 if (ret)
258 return ret; 258 return ret;
259 259
260 return ieee80211_register_hw(mvm->hw); 260 ret = ieee80211_register_hw(mvm->hw);
261 if (ret)
262 iwl_mvm_leds_exit(mvm);
263
264 return ret;
261} 265}
262 266
263static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 267static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -385,6 +389,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
385 ieee80211_wake_queues(mvm->hw); 389 ieee80211_wake_queues(mvm->hw);
386 390
387 mvm->vif_count = 0; 391 mvm->vif_count = 0;
392 mvm->rx_ba_sessions = 0;
388} 393}
389 394
390static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 395static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
@@ -507,6 +512,27 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
507 goto out_unlock; 512 goto out_unlock;
508 513
509 /* 514 /*
515 * TODO: remove this temporary code.
516 * Currently MVM FW supports power management only on single MAC.
517 * If new interface added, disable PM on existing interface.
518 * P2P device is a special case, since it is handled by FW similary to
519 * scan. If P2P deviced is added, PM remains enabled on existing
520 * interface.
521 * Note: the method below does not count the new interface being added
522 * at this moment.
523 */
524 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
525 mvm->vif_count++;
526 if (mvm->vif_count > 1) {
527 IWL_DEBUG_MAC80211(mvm,
528 "Disable power on existing interfaces\n");
529 ieee80211_iterate_active_interfaces_atomic(
530 mvm->hw,
531 IEEE80211_IFACE_ITER_NORMAL,
532 iwl_mvm_pm_disable_iterator, mvm);
533 }
534
535 /*
510 * The AP binding flow can be done only after the beacon 536 * The AP binding flow can be done only after the beacon
511 * template is configured (which happens only in the mac80211 537 * template is configured (which happens only in the mac80211
512 * start_ap() flow), and adding the broadcast station can happen 538 * start_ap() flow), and adding the broadcast station can happen
@@ -529,27 +555,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
529 goto out_unlock; 555 goto out_unlock;
530 } 556 }
531 557
532 /*
533 * TODO: remove this temporary code.
534 * Currently MVM FW supports power management only on single MAC.
535 * If new interface added, disable PM on existing interface.
536 * P2P device is a special case, since it is handled by FW similary to
537 * scan. If P2P deviced is added, PM remains enabled on existing
538 * interface.
539 * Note: the method below does not count the new interface being added
540 * at this moment.
541 */
542 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
543 mvm->vif_count++;
544 if (mvm->vif_count > 1) {
545 IWL_DEBUG_MAC80211(mvm,
546 "Disable power on existing interfaces\n");
547 ieee80211_iterate_active_interfaces_atomic(
548 mvm->hw,
549 IEEE80211_IFACE_ITER_NORMAL,
550 iwl_mvm_pm_disable_iterator, mvm);
551 }
552
553 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 558 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
554 if (ret) 559 if (ret)
555 goto out_release; 560 goto out_release;
@@ -1006,6 +1011,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1006 mutex_lock(&mvm->mutex); 1011 mutex_lock(&mvm->mutex);
1007 if (old_state == IEEE80211_STA_NOTEXIST && 1012 if (old_state == IEEE80211_STA_NOTEXIST &&
1008 new_state == IEEE80211_STA_NONE) { 1013 new_state == IEEE80211_STA_NONE) {
1014 /*
1015 * Firmware bug - it'll crash if the beacon interval is less
1016 * than 16. We can't avoid connecting at all, so refuse the
1017 * station state change, this will cause mac80211 to abandon
1018 * attempts to connect to this AP, and eventually wpa_s will
1019 * blacklist the AP...
1020 */
1021 if (vif->type == NL80211_IFTYPE_STATION &&
1022 vif->bss_conf.beacon_int < 16) {
1023 IWL_ERR(mvm,
1024 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
1025 sta->addr, vif->bss_conf.beacon_int);
1026 ret = -EINVAL;
1027 goto out_unlock;
1028 }
1009 ret = iwl_mvm_add_sta(mvm, vif, sta); 1029 ret = iwl_mvm_add_sta(mvm, vif, sta);
1010 } else if (old_state == IEEE80211_STA_NONE && 1030 } else if (old_state == IEEE80211_STA_NONE &&
1011 new_state == IEEE80211_STA_AUTH) { 1031 new_state == IEEE80211_STA_AUTH) {
@@ -1038,6 +1058,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1038 } else { 1058 } else {
1039 ret = -EIO; 1059 ret = -EIO;
1040 } 1060 }
1061 out_unlock:
1041 mutex_unlock(&mvm->mutex); 1062 mutex_unlock(&mvm->mutex);
1042 1063
1043 return ret; 1064 return ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d40d7db185d6..420e82d379d9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -419,6 +419,7 @@ struct iwl_mvm {
419 struct work_struct sta_drained_wk; 419 struct work_struct sta_drained_wk;
420 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; 420 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
421 atomic_t pending_frames[IWL_MVM_STATION_COUNT]; 421 atomic_t pending_frames[IWL_MVM_STATION_COUNT];
422 u8 rx_ba_sessions;
422 423
423 /* configured by mac80211 */ 424 /* configured by mac80211 */
424 u32 rts_threshold; 425 u32 rts_threshold;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2157b0f8ced5..acdff6b67e04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -137,8 +137,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
137{ 137{
138 int fw_idx, req_idx; 138 int fw_idx, req_idx;
139 139
140 fw_idx = 0; 140 for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
141 for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) { 141 req_idx--, fw_idx++) {
142 cmd->direct_scan[fw_idx].id = WLAN_EID_SSID; 142 cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
143 cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len; 143 cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
144 memcpy(cmd->direct_scan[fw_idx].ssid, 144 memcpy(cmd->direct_scan[fw_idx].ssid,
@@ -153,7 +153,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
153 * just to notify that this scan is active and not passive. 153 * just to notify that this scan is active and not passive.
154 * In order to notify the FW of the number of SSIDs we wish to scan (including 154 * In order to notify the FW of the number of SSIDs we wish to scan (including
155 * the zero-length one), we need to set the corresponding bits in chan->type, 155 * the zero-length one), we need to set the corresponding bits in chan->type,
156 * one for each SSID, and set the active bit (first). 156 * one for each SSID, and set the active bit (first). The first SSID is already
157 * included in the probe template, so we need to set only req->n_ssids - 1 bits
158 * in addition to the first bit.
157 */ 159 */
158static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) 160static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
159{ 161{
@@ -176,19 +178,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
176 struct iwl_scan_channel *chan = (struct iwl_scan_channel *) 178 struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
177 (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); 179 (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
178 int i; 180 int i;
179 __le32 chan_type_value;
180
181 if (req->n_ssids > 0)
182 chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
183 else
184 chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
185 181
186 for (i = 0; i < cmd->channel_count; i++) { 182 for (i = 0; i < cmd->channel_count; i++) {
187 chan->channel = cpu_to_le16(req->channels[i]->hw_value); 183 chan->channel = cpu_to_le16(req->channels[i]->hw_value);
184 chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
188 if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) 185 if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
189 chan->type = SCAN_CHANNEL_TYPE_PASSIVE; 186 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
190 else
191 chan->type = chan_type_value;
192 chan->active_dwell = cpu_to_le16(active_dwell); 187 chan->active_dwell = cpu_to_le16(active_dwell);
193 chan->passive_dwell = cpu_to_le16(passive_dwell); 188 chan->passive_dwell = cpu_to_le16(passive_dwell);
194 chan->iteration_count = cpu_to_le16(1); 189 chan->iteration_count = cpu_to_le16(1);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 62fe5209093b..563f559b902d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -608,6 +608,8 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
608 return ret; 608 return ret;
609} 609}
610 610
611#define IWL_MAX_RX_BA_SESSIONS 16
612
611int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 613int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
612 int tid, u16 ssn, bool start) 614 int tid, u16 ssn, bool start)
613{ 615{
@@ -618,11 +620,20 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
618 620
619 lockdep_assert_held(&mvm->mutex); 621 lockdep_assert_held(&mvm->mutex);
620 622
623 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
624 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
625 return -ENOSPC;
626 }
627
621 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 628 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
622 cmd.sta_id = mvm_sta->sta_id; 629 cmd.sta_id = mvm_sta->sta_id;
623 cmd.add_modify = STA_MODE_MODIFY; 630 cmd.add_modify = STA_MODE_MODIFY;
624 cmd.add_immediate_ba_tid = (u8) tid; 631 if (start) {
625 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 632 cmd.add_immediate_ba_tid = (u8) tid;
633 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
634 } else {
635 cmd.remove_immediate_ba_tid = (u8) tid;
636 }
626 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : 637 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
627 STA_MODIFY_REMOVE_BA_TID; 638 STA_MODIFY_REMOVE_BA_TID;
628 639
@@ -648,6 +659,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
648 break; 659 break;
649 } 660 }
650 661
662 if (!ret) {
663 if (start)
664 mvm->rx_ba_sessions++;
665 else if (mvm->rx_ba_sessions > 0)
666 /* check that restart flow didn't zero the counter */
667 mvm->rx_ba_sessions--;
668 }
669
651 return ret; 670 return ret;
652} 671}
653 672
@@ -896,6 +915,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
896 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; 915 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
897 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 916 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
898 u16 txq_id; 917 u16 txq_id;
918 enum iwl_mvm_agg_state old_state;
899 919
900 /* 920 /*
901 * First set the agg state to OFF to avoid calling 921 * First set the agg state to OFF to avoid calling
@@ -905,13 +925,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
905 txq_id = tid_data->txq_id; 925 txq_id = tid_data->txq_id;
906 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 926 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
907 mvmsta->sta_id, tid, txq_id, tid_data->state); 927 mvmsta->sta_id, tid, txq_id, tid_data->state);
928 old_state = tid_data->state;
908 tid_data->state = IWL_AGG_OFF; 929 tid_data->state = IWL_AGG_OFF;
909 spin_unlock_bh(&mvmsta->lock); 930 spin_unlock_bh(&mvmsta->lock);
910 931
911 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) 932 if (old_state >= IWL_AGG_ON) {
912 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 933 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
934 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
935
936 iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
937 }
913 938
914 iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
915 mvm->queue_to_mac80211[tid_data->txq_id] = 939 mvm->queue_to_mac80211[tid_data->txq_id] =
916 IWL_INVALID_MAC80211_QUEUE; 940 IWL_INVALID_MAC80211_QUEUE;
917 941
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 81f3ea5b09a4..ff13458efc27 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -130,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
130 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ 130 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
131 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ 131 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
132 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ 132 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
133 {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
133 134
134 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ 135 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
135 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ 136 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fd848cd1583e..f600e68a410a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -888,6 +888,14 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
888 888
889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
890 if (hw_rfkill) { 890 if (hw_rfkill) {
891 /*
892 * Clear the interrupt in APMG if the NIC is going down.
893 * Note that when the NIC exits RFkill (else branch), we
894 * can't access prph and the NIC will be reset in
895 * start_hw anyway.
896 */
897 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
898 APMG_RTC_INT_STT_RFKILL);
891 set_bit(STATUS_RFKILL, &trans_pcie->status); 899 set_bit(STATUS_RFKILL, &trans_pcie->status);
892 if (test_and_clear_bit(STATUS_HCMD_ACTIVE, 900 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
893 &trans_pcie->status)) 901 &trans_pcie->status))
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 826c15602c46..96cfcdd39079 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -670,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
670 return err; 670 return err;
671 } 671 }
672 672
673 /* Reset the entire device */
674 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
675
676 usleep_range(10, 15);
677
673 iwl_pcie_apm_init(trans); 678 iwl_pcie_apm_init(trans);
674 679
675 /* From now on, the op_mode will be kept updated about RF kill state */ 680 /* From now on, the op_mode will be kept updated about RF kill state */
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ef5fa890a286..89459db4c53b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1716,9 +1716,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1716 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1716 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1717 int ret; 1717 int ret;
1718 1718
1719 if (priv->bss_mode != NL80211_IFTYPE_STATION) { 1719 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
1720 wiphy_err(wiphy, 1720 wiphy_err(wiphy,
1721 "%s: reject infra assoc request in non-STA mode\n", 1721 "%s: reject infra assoc request in non-STA role\n",
1722 dev->name); 1722 dev->name);
1723 return -EINVAL; 1723 return -EINVAL;
1724 } 1724 }
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 988552dece75..5178c4630d89 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -415,7 +415,8 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
415 u32 k = 0; 415 u32 k = 0;
416 struct mwifiex_adapter *adapter = priv->adapter; 416 struct mwifiex_adapter *adapter = priv->adapter;
417 417
418 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 418 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
419 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
419 switch (adapter->config_bands) { 420 switch (adapter->config_bands) {
420 case BAND_B: 421 case BAND_B:
421 dev_dbg(adapter->dev, "info: infra band=%d " 422 dev_dbg(adapter->dev, "info: infra band=%d "
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index caaf4bd56b30..2cf8b964e966 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -693,7 +693,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
693 if (!ret) { 693 if (!ret) {
694 dev_notice(adapter->dev, 694 dev_notice(adapter->dev,
695 "WLAN FW already running! Skip FW dnld\n"); 695 "WLAN FW already running! Skip FW dnld\n");
696 goto done; 696 return 0;
697 } 697 }
698 698
699 poll_num = MAX_FIRMWARE_POLL_TRIES; 699 poll_num = MAX_FIRMWARE_POLL_TRIES;
@@ -719,14 +719,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
719poll_fw: 719poll_fw:
720 /* Check if the firmware is downloaded successfully or not */ 720 /* Check if the firmware is downloaded successfully or not */
721 ret = adapter->if_ops.check_fw_status(adapter, poll_num); 721 ret = adapter->if_ops.check_fw_status(adapter, poll_num);
722 if (ret) { 722 if (ret)
723 dev_err(adapter->dev, "FW failed to be active in time\n"); 723 dev_err(adapter->dev, "FW failed to be active in time\n");
724 return -1;
725 }
726done:
727 /* re-enable host interrupt for mwifiex after fw dnld is successful */
728 if (adapter->if_ops.enable_int)
729 adapter->if_ops.enable_int(adapter);
730 724
731 return ret; 725 return ret;
732} 726}
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 1c8a771e8e81..12e778159ec5 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1291,8 +1291,10 @@ int mwifiex_associate(struct mwifiex_private *priv,
1291{ 1291{
1292 u8 current_bssid[ETH_ALEN]; 1292 u8 current_bssid[ETH_ALEN];
1293 1293
1294 /* Return error if the adapter or table entry is not marked as infra */ 1294 /* Return error if the adapter is not STA role or table entry
1295 if ((priv->bss_mode != NL80211_IFTYPE_STATION) || 1295 * is not marked as infra.
1296 */
1297 if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
1296 (bss_desc->bss_mode != NL80211_IFTYPE_STATION)) 1298 (bss_desc->bss_mode != NL80211_IFTYPE_STATION))
1297 return -1; 1299 return -1;
1298 1300
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index e15ab72fb03d..1753431de361 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -427,6 +427,10 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
427 "Cal data request_firmware() failed\n"); 427 "Cal data request_firmware() failed\n");
428 } 428 }
429 429
430 /* enable host interrupt after fw dnld is successful */
431 if (adapter->if_ops.enable_int)
432 adapter->if_ops.enable_int(adapter);
433
430 adapter->init_wait_q_woken = false; 434 adapter->init_wait_q_woken = false;
431 ret = mwifiex_init_fw(adapter); 435 ret = mwifiex_init_fw(adapter);
432 if (ret == -1) { 436 if (ret == -1) {
@@ -478,6 +482,8 @@ err_add_intf:
478 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); 482 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
479 rtnl_unlock(); 483 rtnl_unlock();
480err_init_fw: 484err_init_fw:
485 if (adapter->if_ops.disable_int)
486 adapter->if_ops.disable_int(adapter);
481 pr_debug("info: %s: unregister device\n", __func__); 487 pr_debug("info: %s: unregister device\n", __func__);
482 adapter->if_ops.unregister_dev(adapter); 488 adapter->if_ops.unregister_dev(adapter);
483done: 489done:
@@ -855,7 +861,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
855 INIT_WORK(&adapter->main_work, mwifiex_main_work_queue); 861 INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
856 862
857 /* Register the device. Fill up the private data structure with relevant 863 /* Register the device. Fill up the private data structure with relevant
858 information from the card and request for the required IRQ. */ 864 information from the card. */
859 if (adapter->if_ops.register_dev(adapter)) { 865 if (adapter->if_ops.register_dev(adapter)) {
860 pr_err("%s: failed to register mwifiex device\n", __func__); 866 pr_err("%s: failed to register mwifiex device\n", __func__);
861 goto err_registerdev; 867 goto err_registerdev;
@@ -919,6 +925,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
919 if (!adapter) 925 if (!adapter)
920 goto exit_remove; 926 goto exit_remove;
921 927
928 /* We can no longer handle interrupts once we start doing the teardown
929 * below. */
930 if (adapter->if_ops.disable_int)
931 adapter->if_ops.disable_int(adapter);
932
922 adapter->surprise_removed = true; 933 adapter->surprise_removed = true;
923 934
924 /* Stop data */ 935 /* Stop data */
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 3da73d36acdf..253e0bd38e25 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -601,6 +601,7 @@ struct mwifiex_if_ops {
601 int (*register_dev) (struct mwifiex_adapter *); 601 int (*register_dev) (struct mwifiex_adapter *);
602 void (*unregister_dev) (struct mwifiex_adapter *); 602 void (*unregister_dev) (struct mwifiex_adapter *);
603 int (*enable_int) (struct mwifiex_adapter *); 603 int (*enable_int) (struct mwifiex_adapter *);
604 void (*disable_int) (struct mwifiex_adapter *);
604 int (*process_int_status) (struct mwifiex_adapter *); 605 int (*process_int_status) (struct mwifiex_adapter *);
605 int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *, 606 int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *,
606 struct mwifiex_tx_param *); 607 struct mwifiex_tx_param *);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 5ee5ed02eccd..09185c963248 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -51,6 +51,7 @@ static struct mwifiex_if_ops sdio_ops;
51static struct semaphore add_remove_card_sem; 51static struct semaphore add_remove_card_sem;
52 52
53static int mwifiex_sdio_resume(struct device *dev); 53static int mwifiex_sdio_resume(struct device *dev);
54static void mwifiex_sdio_interrupt(struct sdio_func *func);
54 55
55/* 56/*
56 * SDIO probe. 57 * SDIO probe.
@@ -296,6 +297,15 @@ static struct sdio_driver mwifiex_sdio = {
296 } 297 }
297}; 298};
298 299
300/* Write data into SDIO card register. Caller claims SDIO device. */
301static int
302mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
303{
304 int ret = -1;
305 sdio_writeb(func, data, reg, &ret);
306 return ret;
307}
308
299/* 309/*
300 * This function writes data into SDIO card register. 310 * This function writes data into SDIO card register.
301 */ 311 */
@@ -303,10 +313,10 @@ static int
303mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) 313mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
304{ 314{
305 struct sdio_mmc_card *card = adapter->card; 315 struct sdio_mmc_card *card = adapter->card;
306 int ret = -1; 316 int ret;
307 317
308 sdio_claim_host(card->func); 318 sdio_claim_host(card->func);
309 sdio_writeb(card->func, data, reg, &ret); 319 ret = mwifiex_write_reg_locked(card->func, reg, data);
310 sdio_release_host(card->func); 320 sdio_release_host(card->func);
311 321
312 return ret; 322 return ret;
@@ -685,23 +695,15 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
685 * The host interrupt mask is read, the disable bit is reset and 695 * The host interrupt mask is read, the disable bit is reset and
686 * written back to the card host interrupt mask register. 696 * written back to the card host interrupt mask register.
687 */ 697 */
688static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) 698static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
689{ 699{
690 u8 host_int_mask, host_int_disable = HOST_INT_DISABLE; 700 struct sdio_mmc_card *card = adapter->card;
691 701 struct sdio_func *func = card->func;
692 /* Read back the host_int_mask register */
693 if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
694 return -1;
695
696 /* Update with the mask and write back to the register */
697 host_int_mask &= ~host_int_disable;
698
699 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
700 dev_err(adapter->dev, "disable host interrupt failed\n");
701 return -1;
702 }
703 702
704 return 0; 703 sdio_claim_host(func);
704 mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0);
705 sdio_release_irq(func);
706 sdio_release_host(func);
705} 707}
706 708
707/* 709/*
@@ -713,14 +715,29 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
713static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) 715static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
714{ 716{
715 struct sdio_mmc_card *card = adapter->card; 717 struct sdio_mmc_card *card = adapter->card;
718 struct sdio_func *func = card->func;
719 int ret;
720
721 sdio_claim_host(func);
722
723 /* Request the SDIO IRQ */
724 ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
725 if (ret) {
726 dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
727 goto out;
728 }
716 729
717 /* Simply write the mask to the register */ 730 /* Simply write the mask to the register */
718 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, 731 ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG,
719 card->reg->host_int_enable)) { 732 card->reg->host_int_enable);
733 if (ret) {
720 dev_err(adapter->dev, "enable host interrupt failed\n"); 734 dev_err(adapter->dev, "enable host interrupt failed\n");
721 return -1; 735 sdio_release_irq(func);
722 } 736 }
723 return 0; 737
738out:
739 sdio_release_host(func);
740 return ret;
724} 741}
725 742
726/* 743/*
@@ -997,9 +1014,6 @@ mwifiex_sdio_interrupt(struct sdio_func *func)
997 } 1014 }
998 adapter = card->adapter; 1015 adapter = card->adapter;
999 1016
1000 if (adapter->surprise_removed)
1001 return;
1002
1003 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) 1017 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
1004 adapter->ps_state = PS_STATE_AWAKE; 1018 adapter->ps_state = PS_STATE_AWAKE;
1005 1019
@@ -1625,8 +1639,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1625 /* Allocate buffer and copy payload */ 1639 /* Allocate buffer and copy payload */
1626 blk_size = MWIFIEX_SDIO_BLOCK_SIZE; 1640 blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
1627 buf_block_len = (pkt_len + blk_size - 1) / blk_size; 1641 buf_block_len = (pkt_len + blk_size - 1) / blk_size;
1628 *(u16 *) &payload[0] = (u16) pkt_len; 1642 *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
1629 *(u16 *) &payload[2] = type; 1643 *(__le16 *)&payload[2] = cpu_to_le16(type);
1630 1644
1631 /* 1645 /*
1632 * This is SDIO specific header 1646 * This is SDIO specific header
@@ -1728,9 +1742,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1728 struct sdio_mmc_card *card = adapter->card; 1742 struct sdio_mmc_card *card = adapter->card;
1729 1743
1730 if (adapter->card) { 1744 if (adapter->card) {
1731 /* Release the SDIO IRQ */
1732 sdio_claim_host(card->func); 1745 sdio_claim_host(card->func);
1733 sdio_release_irq(card->func);
1734 sdio_disable_func(card->func); 1746 sdio_disable_func(card->func);
1735 sdio_release_host(card->func); 1747 sdio_release_host(card->func);
1736 sdio_set_drvdata(card->func, NULL); 1748 sdio_set_drvdata(card->func, NULL);
@@ -1744,7 +1756,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1744 */ 1756 */
1745static int mwifiex_register_dev(struct mwifiex_adapter *adapter) 1757static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1746{ 1758{
1747 int ret = 0; 1759 int ret;
1748 struct sdio_mmc_card *card = adapter->card; 1760 struct sdio_mmc_card *card = adapter->card;
1749 struct sdio_func *func = card->func; 1761 struct sdio_func *func = card->func;
1750 1762
@@ -1753,22 +1765,14 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1753 1765
1754 sdio_claim_host(func); 1766 sdio_claim_host(func);
1755 1767
1756 /* Request the SDIO IRQ */
1757 ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
1758 if (ret) {
1759 pr_err("claim irq failed: ret=%d\n", ret);
1760 goto disable_func;
1761 }
1762
1763 /* Set block size */ 1768 /* Set block size */
1764 ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); 1769 ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
1770 sdio_release_host(func);
1765 if (ret) { 1771 if (ret) {
1766 pr_err("cannot set SDIO block size\n"); 1772 pr_err("cannot set SDIO block size\n");
1767 ret = -1; 1773 return ret;
1768 goto release_irq;
1769 } 1774 }
1770 1775
1771 sdio_release_host(func);
1772 sdio_set_drvdata(func, card); 1776 sdio_set_drvdata(func, card);
1773 1777
1774 adapter->dev = &func->dev; 1778 adapter->dev = &func->dev;
@@ -1776,15 +1780,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1776 strcpy(adapter->fw_name, card->firmware); 1780 strcpy(adapter->fw_name, card->firmware);
1777 1781
1778 return 0; 1782 return 0;
1779
1780release_irq:
1781 sdio_release_irq(func);
1782disable_func:
1783 sdio_disable_func(func);
1784 sdio_release_host(func);
1785 adapter->card = NULL;
1786
1787 return -1;
1788} 1783}
1789 1784
1790/* 1785/*
@@ -1813,9 +1808,6 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1813 */ 1808 */
1814 mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg); 1809 mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
1815 1810
1816 /* Disable host interrupt mask register for SDIO */
1817 mwifiex_sdio_disable_host_int(adapter);
1818
1819 /* Get SDIO ioport */ 1811 /* Get SDIO ioport */
1820 mwifiex_init_sdio_ioport(adapter); 1812 mwifiex_init_sdio_ioport(adapter);
1821 1813
@@ -1957,6 +1949,7 @@ static struct mwifiex_if_ops sdio_ops = {
1957 .register_dev = mwifiex_register_dev, 1949 .register_dev = mwifiex_register_dev,
1958 .unregister_dev = mwifiex_unregister_dev, 1950 .unregister_dev = mwifiex_unregister_dev,
1959 .enable_int = mwifiex_sdio_enable_host_int, 1951 .enable_int = mwifiex_sdio_enable_host_int,
1952 .disable_int = mwifiex_sdio_disable_host_int,
1960 .process_int_status = mwifiex_process_int_status, 1953 .process_int_status = mwifiex_process_int_status,
1961 .host_to_card = mwifiex_sdio_host_to_card, 1954 .host_to_card = mwifiex_sdio_host_to_card,
1962 .wakeup = mwifiex_pm_wakeup_card, 1955 .wakeup = mwifiex_pm_wakeup_card,
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 6d51dfdd8251..532ae0ac4dfb 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -92,9 +92,6 @@
92/* Host Control Registers : Download host interrupt mask */ 92/* Host Control Registers : Download host interrupt mask */
93#define DN_LD_HOST_INT_MASK (0x2U) 93#define DN_LD_HOST_INT_MASK (0x2U)
94 94
95/* Disable Host interrupt mask */
96#define HOST_INT_DISABLE 0xff
97
98/* Host Control Registers : Host interrupt status */ 95/* Host Control Registers : Host interrupt status */
99#define HOST_INTSTATUS_REG 0x03 96#define HOST_INTSTATUS_REG 0x03
100/* Host Control Registers : Upload host interrupt status */ 97/* Host Control Registers : Upload host interrupt status */
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 206c3e038072..8af97abf7108 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -257,10 +257,10 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
257 goto done; 257 goto done;
258 } 258 }
259 259
260 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 260 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
261 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
261 u8 config_bands; 262 u8 config_bands;
262 263
263 /* Infra mode */
264 ret = mwifiex_deauthenticate(priv, NULL); 264 ret = mwifiex_deauthenticate(priv, NULL);
265 if (ret) 265 if (ret)
266 goto done; 266 goto done;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9b915d3a44be..3e60a31582f8 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
1menuconfig RT2X00 1menuconfig RT2X00
2 tristate "Ralink driver support" 2 tristate "Ralink driver support"
3 depends on MAC80211 3 depends on MAC80211 && HAS_DMA
4 ---help--- 4 ---help---
5 This will enable the support for the Ralink drivers, 5 This will enable the support for the Ralink drivers,
6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. 6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c0a91ff963c..aa95c6cf3545 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
936 spin_unlock_irqrestore(&queue->index_lock, irqflags); 936 spin_unlock_irqrestore(&queue->index_lock, irqflags);
937} 937}
938 938
939void rt2x00queue_pause_queue(struct data_queue *queue) 939void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
940{ 940{
941 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
942 !test_bit(QUEUE_STARTED, &queue->flags) ||
943 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
944 return;
945
946 switch (queue->qid) { 941 switch (queue->qid) {
947 case QID_AC_VO: 942 case QID_AC_VO:
948 case QID_AC_VI: 943 case QID_AC_VI:
@@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
958 break; 953 break;
959 } 954 }
960} 955}
956void rt2x00queue_pause_queue(struct data_queue *queue)
957{
958 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
959 !test_bit(QUEUE_STARTED, &queue->flags) ||
960 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
961 return;
962
963 rt2x00queue_pause_queue_nocheck(queue);
964}
961EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); 965EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
962 966
963void rt2x00queue_unpause_queue(struct data_queue *queue) 967void rt2x00queue_unpause_queue(struct data_queue *queue)
@@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
1019 return; 1023 return;
1020 } 1024 }
1021 1025
1022 rt2x00queue_pause_queue(queue); 1026 rt2x00queue_pause_queue_nocheck(queue);
1023 1027
1024 queue->rt2x00dev->ops->lib->stop_queue(queue); 1028 queue->rt2x00dev->ops->lib->stop_queue(queue);
1025 1029
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7253de3d8c66..c2ffce7a907c 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,27 +1,20 @@
1config RTLWIFI 1menuconfig RTL_CARDS
2 tristate "Realtek wireless card support" 2 tristate "Realtek rtlwifi family of devices"
3 depends on MAC80211 3 depends on MAC80211 && (PCI || USB)
4 select FW_LOADER
5 ---help---
6 This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
7 drivers. This module does nothing by itself - the various front-end
8 drivers need to be enabled to support any desired devices.
9
10 If you choose to build as a module, it'll be called rtlwifi.
11
12config RTLWIFI_DEBUG
13 bool "Debugging output for rtlwifi driver family"
14 depends on RTLWIFI
15 default y 4 default y
16 ---help--- 5 ---help---
17 To use the module option that sets the dynamic-debugging level for, 6 This option will enable support for the Realtek mac80211-based
18 the front-end driver, this parameter must be "Y". For memory-limited 7 wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
19 systems, choose "N". If in doubt, choose "Y". 8 rtl8723eu, and rtl8188eu share some common code.
9
10if RTL_CARDS
20 11
21config RTL8192CE 12config RTL8192CE
22 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" 13 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
23 depends on RTLWIFI && PCI 14 depends on PCI
24 select RTL8192C_COMMON 15 select RTL8192C_COMMON
16 select RTLWIFI
17 select RTLWIFI_PCI
25 ---help--- 18 ---help---
26 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe 19 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
27 wireless network adapters. 20 wireless network adapters.
@@ -30,7 +23,9 @@ config RTL8192CE
30 23
31config RTL8192SE 24config RTL8192SE
32 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" 25 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
33 depends on RTLWIFI && PCI 26 depends on PCI
27 select RTLWIFI
28 select RTLWIFI_PCI
34 ---help--- 29 ---help---
35 This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe 30 This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
36 wireless network adapters. 31 wireless network adapters.
@@ -39,7 +34,9 @@ config RTL8192SE
39 34
40config RTL8192DE 35config RTL8192DE
41 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" 36 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
42 depends on RTLWIFI && PCI 37 depends on PCI
38 select RTLWIFI
39 select RTLWIFI_PCI
43 ---help--- 40 ---help---
44 This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe 41 This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
45 wireless network adapters. 42 wireless network adapters.
@@ -48,7 +45,9 @@ config RTL8192DE
48 45
49config RTL8723AE 46config RTL8723AE
50 tristate "Realtek RTL8723AE PCIe Wireless Network Adapter" 47 tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
51 depends on RTLWIFI && PCI 48 depends on PCI
49 select RTLWIFI
50 select RTLWIFI_PCI
52 ---help--- 51 ---help---
53 This is the driver for Realtek RTL8723AE 802.11n PCIe 52 This is the driver for Realtek RTL8723AE 802.11n PCIe
54 wireless network adapters. 53 wireless network adapters.
@@ -57,7 +56,9 @@ config RTL8723AE
57 56
58config RTL8188EE 57config RTL8188EE
59 tristate "Realtek RTL8188EE Wireless Network Adapter" 58 tristate "Realtek RTL8188EE Wireless Network Adapter"
60 depends on RTLWIFI && PCI 59 depends on PCI
60 select RTLWIFI
61 select RTLWIFI_PCI
61 ---help--- 62 ---help---
62 This is the driver for Realtek RTL8188EE 802.11n PCIe 63 This is the driver for Realtek RTL8188EE 802.11n PCIe
63 wireless network adapters. 64 wireless network adapters.
@@ -66,7 +67,9 @@ config RTL8188EE
66 67
67config RTL8192CU 68config RTL8192CU
68 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" 69 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
69 depends on RTLWIFI && USB 70 depends on USB
71 select RTLWIFI
72 select RTLWIFI_USB
70 select RTL8192C_COMMON 73 select RTL8192C_COMMON
71 ---help--- 74 ---help---
72 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB 75 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -74,7 +77,28 @@ config RTL8192CU
74 77
75 If you choose to build it as a module, it will be called rtl8192cu 78 If you choose to build it as a module, it will be called rtl8192cu
76 79
80config RTLWIFI
81 tristate
82 select FW_LOADER
83
84config RTLWIFI_PCI
85 tristate
86
87config RTLWIFI_USB
88 tristate
89
90config RTLWIFI_DEBUG
91 bool "Debugging output for rtlwifi driver family"
92 depends on RTLWIFI
93 default y
94 ---help---
95 To use the module option that sets the dynamic-debugging level for,
96 the front-end driver, this parameter must be "Y". For memory-limited
97 systems, choose "N". If in doubt, choose "Y".
98
77config RTL8192C_COMMON 99config RTL8192C_COMMON
78 tristate 100 tristate
79 depends on RTL8192CE || RTL8192CU 101 depends on RTL8192CE || RTL8192CU
80 default m 102 default y
103
104endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index ff02b874f8d8..d56f023a4b90 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -12,13 +12,11 @@ rtlwifi-objs := \
12 12
13rtl8192c_common-objs += \ 13rtl8192c_common-objs += \
14 14
15ifneq ($(CONFIG_PCI),) 15obj-$(CONFIG_RTLWIFI_PCI) += rtl_pci.o
16rtlwifi-objs += pci.o 16rtl_pci-objs := pci.o
17endif
18 17
19ifneq ($(CONFIG_USB),) 18obj-$(CONFIG_RTLWIFI_USB) += rtl_usb.o
20rtlwifi-objs += usb.o 19rtl_usb-objs := usb.o
21endif
22 20
23obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/ 21obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
24obj-$(CONFIG_RTL8192CE) += rtl8192ce/ 22obj-$(CONFIG_RTL8192CE) += rtl8192ce/
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 9d558ac77b0c..7651f5acc14b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -172,6 +172,7 @@ u8 rtl_tid_to_ac(u8 tid)
172{ 172{
173 return tid_to_ac[tid]; 173 return tid_to_ac[tid];
174} 174}
175EXPORT_SYMBOL_GPL(rtl_tid_to_ac);
175 176
176static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, 177static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
177 struct ieee80211_sta_ht_cap *ht_cap) 178 struct ieee80211_sta_ht_cap *ht_cap)
@@ -406,6 +407,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
406 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); 407 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
407 cancel_delayed_work(&rtlpriv->works.fwevt_wq); 408 cancel_delayed_work(&rtlpriv->works.fwevt_wq);
408} 409}
410EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
409 411
410void rtl_init_rfkill(struct ieee80211_hw *hw) 412void rtl_init_rfkill(struct ieee80211_hw *hw)
411{ 413{
@@ -439,6 +441,7 @@ void rtl_deinit_rfkill(struct ieee80211_hw *hw)
439{ 441{
440 wiphy_rfkill_stop_polling(hw->wiphy); 442 wiphy_rfkill_stop_polling(hw->wiphy);
441} 443}
444EXPORT_SYMBOL_GPL(rtl_deinit_rfkill);
442 445
443int rtl_init_core(struct ieee80211_hw *hw) 446int rtl_init_core(struct ieee80211_hw *hw)
444{ 447{
@@ -489,10 +492,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
489 492
490 return 0; 493 return 0;
491} 494}
495EXPORT_SYMBOL_GPL(rtl_init_core);
492 496
493void rtl_deinit_core(struct ieee80211_hw *hw) 497void rtl_deinit_core(struct ieee80211_hw *hw)
494{ 498{
495} 499}
500EXPORT_SYMBOL_GPL(rtl_deinit_core);
496 501
497void rtl_init_rx_config(struct ieee80211_hw *hw) 502void rtl_init_rx_config(struct ieee80211_hw *hw)
498{ 503{
@@ -501,6 +506,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw)
501 506
502 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf)); 507 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
503} 508}
509EXPORT_SYMBOL_GPL(rtl_init_rx_config);
504 510
505/********************************************************* 511/*********************************************************
506 * 512 *
@@ -879,6 +885,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
879 885
880 return true; 886 return true;
881} 887}
888EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc);
882 889
883void rtl_get_tcb_desc(struct ieee80211_hw *hw, 890void rtl_get_tcb_desc(struct ieee80211_hw *hw,
884 struct ieee80211_tx_info *info, 891 struct ieee80211_tx_info *info,
@@ -1052,6 +1059,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1052 1059
1053 return true; 1060 return true;
1054} 1061}
1062EXPORT_SYMBOL_GPL(rtl_action_proc);
1055 1063
1056/*should call before software enc*/ 1064/*should call before software enc*/
1057u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) 1065u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
@@ -1125,6 +1133,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1125 1133
1126 return false; 1134 return false;
1127} 1135}
1136EXPORT_SYMBOL_GPL(rtl_is_special_data);
1128 1137
1129/********************************************************* 1138/*********************************************************
1130 * 1139 *
@@ -1300,6 +1309,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
1300 1309
1301 rtlpriv->link_info.bcn_rx_inperiod++; 1310 rtlpriv->link_info.bcn_rx_inperiod++;
1302} 1311}
1312EXPORT_SYMBOL_GPL(rtl_beacon_statistic);
1303 1313
1304void rtl_watchdog_wq_callback(void *data) 1314void rtl_watchdog_wq_callback(void *data)
1305{ 1315{
@@ -1793,6 +1803,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
1793 1803
1794 mac->vendor = vendor; 1804 mac->vendor = vendor;
1795} 1805}
1806EXPORT_SYMBOL_GPL(rtl_recognize_peer);
1796 1807
1797/********************************************************* 1808/*********************************************************
1798 * 1809 *
@@ -1849,6 +1860,7 @@ struct attribute_group rtl_attribute_group = {
1849 .name = "rtlsysfs", 1860 .name = "rtlsysfs",
1850 .attrs = rtl_sysfs_entries, 1861 .attrs = rtl_sysfs_entries,
1851}; 1862};
1863EXPORT_SYMBOL_GPL(rtl_attribute_group);
1852 1864
1853MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); 1865MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
1854MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); 1866MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
@@ -1856,7 +1868,8 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
1856MODULE_LICENSE("GPL"); 1868MODULE_LICENSE("GPL");
1857MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); 1869MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
1858 1870
1859struct rtl_global_var global_var = {}; 1871struct rtl_global_var rtl_global_var = {};
1872EXPORT_SYMBOL_GPL(rtl_global_var);
1860 1873
1861static int __init rtl_core_module_init(void) 1874static int __init rtl_core_module_init(void)
1862{ 1875{
@@ -1864,8 +1877,8 @@ static int __init rtl_core_module_init(void)
1864 pr_err("Unable to register rtl_rc, use default RC !!\n"); 1877 pr_err("Unable to register rtl_rc, use default RC !!\n");
1865 1878
1866 /* init some global vars */ 1879 /* init some global vars */
1867 INIT_LIST_HEAD(&global_var.glb_priv_list); 1880 INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
1868 spin_lock_init(&global_var.glb_list_lock); 1881 spin_lock_init(&rtl_global_var.glb_list_lock);
1869 1882
1870 return 0; 1883 return 0;
1871} 1884}
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 8576bc34b032..0e5fe0902daf 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -147,7 +147,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
147u8 rtl_tid_to_ac(u8 tid); 147u8 rtl_tid_to_ac(u8 tid);
148extern struct attribute_group rtl_attribute_group; 148extern struct attribute_group rtl_attribute_group;
149void rtl_easy_concurrent_retrytimer_callback(unsigned long data); 149void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
150extern struct rtl_global_var global_var; 150extern struct rtl_global_var rtl_global_var;
151int rtlwifi_rate_mapping(struct ieee80211_hw *hw, 151int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
152 bool isht, u8 desc_rate, bool first_ampdu); 152 bool isht, u8 desc_rate, bool first_ampdu);
153bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); 153bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index ee84844be008..733b7ce7f0e2 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1330,3 +1330,4 @@ const struct ieee80211_ops rtl_ops = {
1330 .rfkill_poll = rtl_op_rfkill_poll, 1330 .rfkill_poll = rtl_op_rfkill_poll,
1331 .flush = rtl_op_flush, 1331 .flush = rtl_op_flush,
1332}; 1332};
1333EXPORT_SYMBOL_GPL(rtl_ops);
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index 7d52d3d7769f..76e2086e137e 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -51,3 +51,4 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
51 51
52 /*Init Debug flag enable condition */ 52 /*Init Debug flag enable condition */
53} 53}
54EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 9e3894178e77..838a1ed3f194 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -229,6 +229,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
229 229
230 *pbuf = (u8) (value32 & 0xff); 230 *pbuf = (u8) (value32 & 0xff);
231} 231}
232EXPORT_SYMBOL_GPL(read_efuse_byte);
232 233
233void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) 234void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
234{ 235{
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c97e9d327331..703f839af6ca 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -35,6 +35,13 @@
35#include "efuse.h" 35#include "efuse.h"
36#include <linux/export.h> 36#include <linux/export.h>
37#include <linux/kmemleak.h> 37#include <linux/kmemleak.h>
38#include <linux/module.h>
39
40MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
41MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
42MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
43MODULE_LICENSE("GPL");
44MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
38 45
39static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { 46static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
40 PCI_VENDOR_ID_INTEL, 47 PCI_VENDOR_ID_INTEL,
@@ -1008,19 +1015,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1008 return; 1015 return;
1009} 1016}
1010 1017
1011static void rtl_lps_change_work_callback(struct work_struct *work)
1012{
1013 struct rtl_works *rtlworks =
1014 container_of(work, struct rtl_works, lps_change_work);
1015 struct ieee80211_hw *hw = rtlworks->hw;
1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017
1018 if (rtlpriv->enter_ps)
1019 rtl_lps_enter(hw);
1020 else
1021 rtl_lps_leave(hw);
1022}
1023
1024static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) 1018static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
1025{ 1019{
1026 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1020 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1899,7 +1893,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
1899 rtlpriv->rtlhal.interface = INTF_PCI; 1893 rtlpriv->rtlhal.interface = INTF_PCI;
1900 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); 1894 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1901 rtlpriv->intf_ops = &rtl_pci_ops; 1895 rtlpriv->intf_ops = &rtl_pci_ops;
1902 rtlpriv->glb_var = &global_var; 1896 rtlpriv->glb_var = &rtl_global_var;
1903 1897
1904 /* 1898 /*
1905 *init dbgp flags before all 1899 *init dbgp flags before all
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 884bceae38a9..298b615964e8 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -269,6 +269,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
269 269
270 spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); 270 spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
271} 271}
272EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
272 273
273/*for FW LPS*/ 274/*for FW LPS*/
274 275
@@ -518,6 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
518 "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed); 519 "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
519 } 520 }
520} 521}
522EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
521 523
522void rtl_swlps_rf_awake(struct ieee80211_hw *hw) 524void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
523{ 525{
@@ -611,6 +613,19 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
611 MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40)); 613 MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
612} 614}
613 615
616void rtl_lps_change_work_callback(struct work_struct *work)
617{
618 struct rtl_works *rtlworks =
619 container_of(work, struct rtl_works, lps_change_work);
620 struct ieee80211_hw *hw = rtlworks->hw;
621 struct rtl_priv *rtlpriv = rtl_priv(hw);
622
623 if (rtlpriv->enter_ps)
624 rtl_lps_enter(hw);
625 else
626 rtl_lps_leave(hw);
627}
628EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
614 629
615void rtl_swlps_wq_callback(void *data) 630void rtl_swlps_wq_callback(void *data)
616{ 631{
@@ -922,3 +937,4 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
922 else 937 else
923 rtl_p2p_noa_ie(hw, data, len - FCS_LEN); 938 rtl_p2p_noa_ie(hw, data, len - FCS_LEN);
924} 939}
940EXPORT_SYMBOL_GPL(rtl_p2p_info);
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 4d682b753f50..88bd76ea88f7 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
49void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); 49void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
50void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); 50void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
51void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len); 51void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
52void rtl_lps_change_work_callback(struct work_struct *work);
52 53
53#endif 54#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a3532e077871..e56778cac9bf 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -32,6 +32,13 @@
32#include "ps.h" 32#include "ps.h"
33#include "rtl8192c/fw_common.h" 33#include "rtl8192c/fw_common.h"
34#include <linux/export.h> 34#include <linux/export.h>
35#include <linux/module.h>
36
37MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
38MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
39MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
40MODULE_LICENSE("GPL");
41MODULE_DESCRIPTION("USB basic driver for rtlwifi");
35 42
36#define REALTEK_USB_VENQT_READ 0xC0 43#define REALTEK_USB_VENQT_READ 0xC0
37#define REALTEK_USB_VENQT_WRITE 0x40 44#define REALTEK_USB_VENQT_WRITE 0x40
@@ -1070,6 +1077,8 @@ int rtl_usb_probe(struct usb_interface *intf,
1070 spin_lock_init(&rtlpriv->locks.usb_lock); 1077 spin_lock_init(&rtlpriv->locks.usb_lock);
1071 INIT_WORK(&rtlpriv->works.fill_h2c_cmd, 1078 INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
1072 rtl_fill_h2c_cmd_work_callback); 1079 rtl_fill_h2c_cmd_work_callback);
1080 INIT_WORK(&rtlpriv->works.lps_change_work,
1081 rtl_lps_change_work_callback);
1073 1082
1074 rtlpriv->usb_data_index = 0; 1083 rtlpriv->usb_data_index = 0;
1075 init_completion(&rtlpriv->firmware_loading_complete); 1084 init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff7f111fffee..36808bf25677 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -286,8 +286,7 @@ no_skb:
286 break; 286 break;
287 } 287 }
288 288
289 __skb_fill_page_desc(skb, 0, page, 0, 0); 289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290 skb_shinfo(skb)->nr_frags = 1;
291 __skb_queue_tail(&np->rx_batch, skb); 290 __skb_queue_tail(&np->rx_batch, skb);
292 } 291 }
293 292
@@ -831,7 +830,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
831 struct sk_buff_head *list) 830 struct sk_buff_head *list)
832{ 831{
833 struct skb_shared_info *shinfo = skb_shinfo(skb); 832 struct skb_shared_info *shinfo = skb_shinfo(skb);
834 int nr_frags = shinfo->nr_frags;
835 RING_IDX cons = np->rx.rsp_cons; 833 RING_IDX cons = np->rx.rsp_cons;
836 struct sk_buff *nskb; 834 struct sk_buff *nskb;
837 835
@@ -840,19 +838,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
840 RING_GET_RESPONSE(&np->rx, ++cons); 838 RING_GET_RESPONSE(&np->rx, ++cons);
841 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 839 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
842 840
843 __skb_fill_page_desc(skb, nr_frags, 841 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
844 skb_frag_page(nfrag), 842 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
845 rx->offset, rx->status);
846 843
847 skb->data_len += rx->status; 844 BUG_ON(pull_to <= skb_headlen(skb));
845 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
846 }
847 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
848
849 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
850 rx->offset, rx->status, PAGE_SIZE);
848 851
849 skb_shinfo(nskb)->nr_frags = 0; 852 skb_shinfo(nskb)->nr_frags = 0;
850 kfree_skb(nskb); 853 kfree_skb(nskb);
851
852 nr_frags++;
853 } 854 }
854 855
855 shinfo->nr_frags = nr_frags;
856 return cons; 856 return cons;
857} 857}
858 858
@@ -933,7 +933,8 @@ static int handle_incoming_queue(struct net_device *dev,
933 while ((skb = __skb_dequeue(rxq)) != NULL) { 933 while ((skb = __skb_dequeue(rxq)) != NULL) {
934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
935 935
936 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 936 if (pull_to > skb_headlen(skb))
937 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
937 938
938 /* Ethernet work: Delayed to here as it peeks the header. */ 939 /* Ethernet work: Delayed to here as it peeks the header. */
939 skb->protocol = eth_type_trans(skb, dev); 940 skb->protocol = eth_type_trans(skb, dev);
@@ -1019,16 +1020,10 @@ err:
1019 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1020 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1020 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1021 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1021 skb->data_len = rx->status; 1022 skb->data_len = rx->status;
1023 skb->len += rx->status;
1022 1024
1023 i = xennet_fill_frags(np, skb, &tmpq); 1025 i = xennet_fill_frags(np, skb, &tmpq);
1024 1026
1025 /*
1026 * Truesize is the actual allocation size, even if the
1027 * allocation is only partially used.
1028 */
1029 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
1030 skb->len += skb->data_len;
1031
1032 if (rx->flags & XEN_NETRXF_csum_blank) 1027 if (rx->flags & XEN_NETRXF_csum_blank)
1033 skb->ip_summed = CHECKSUM_PARTIAL; 1028 skb->ip_summed = CHECKSUM_PARTIAL;
1034 else if (rx->flags & XEN_NETRXF_data_validated) 1029 else if (rx->flags & XEN_NETRXF_data_validated)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a3c1c5aae6a9..1264923ade0f 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -345,6 +345,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
345 if (r && irq) { 345 if (r && irq) {
346 const char *name = NULL; 346 const char *name = NULL;
347 347
348 memset(r, 0, sizeof(*r));
348 /* 349 /*
349 * Get optional "interrupts-names" property to add a name 350 * Get optional "interrupts-names" property to add a name
350 * to the resource. 351 * to the resource.
@@ -482,8 +483,9 @@ void __init of_irq_init(const struct of_device_id *matches)
482 } 483 }
483 484
484 /* Get the next pending parent that might have children */ 485 /* Get the next pending parent that might have children */
485 desc = list_first_entry(&intc_parent_list, typeof(*desc), list); 486 desc = list_first_entry_or_null(&intc_parent_list,
486 if (list_empty(&intc_parent_list) || !desc) { 487 typeof(*desc), list);
488 if (!desc) {
487 pr_err("of_irq_init: children remain, but no parents\n"); 489 pr_err("of_irq_init: children remain, but no parents\n");
488 break; 490 break;
489 } 491 }
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 93404f72dfa8..61be1d9c16c8 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -74,8 +74,8 @@ static void oprofile_hrtimer_stop(void)
74 put_online_cpus(); 74 put_online_cpus();
75} 75}
76 76
77static int __cpuinit oprofile_cpu_notify(struct notifier_block *self, 77static int oprofile_cpu_notify(struct notifier_block *self,
78 unsigned long action, void *hcpu) 78 unsigned long action, void *hcpu)
79{ 79{
80 long cpu = (long) hcpu; 80 long cpu = (long) hcpu;
81 81
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index e79e006eb9ab..9ee04b4b68bf 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -811,18 +811,28 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
811 return pcidev->irq; 811 return pcidev->irq;
812} 812}
813 813
814static struct iosapic_info *first_isi = NULL; 814static struct iosapic_info *iosapic_list;
815 815
816#ifdef CONFIG_64BIT 816#ifdef CONFIG_64BIT
817int iosapic_serial_irq(int num) 817int iosapic_serial_irq(struct parisc_device *dev)
818{ 818{
819 struct iosapic_info *isi = first_isi; 819 struct iosapic_info *isi;
820 struct irt_entry *irte = NULL; /* only used if PAT PDC */ 820 struct irt_entry *irte;
821 struct vector_info *vi; 821 struct vector_info *vi;
822 int isi_line; /* line used by device */ 822 int cnt;
823 int intin;
824
825 intin = (dev->mod_info >> 24) & 15;
823 826
824 /* lookup IRT entry for isi/slot/pin set */ 827 /* lookup IRT entry for isi/slot/pin set */
825 irte = &irt_cell[num]; 828 for (cnt = 0; cnt < irt_num_entry; cnt++) {
829 irte = &irt_cell[cnt];
830 if (COMPARE_IRTE_ADDR(irte, dev->mod0) &&
831 irte->dest_iosapic_intin == intin)
832 break;
833 }
834 if (cnt >= irt_num_entry)
835 return 0; /* no irq found, force polling */
826 836
827 DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", 837 DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
828 irte, 838 irte,
@@ -834,11 +844,17 @@ int iosapic_serial_irq(int num)
834 irte->src_seg_id, 844 irte->src_seg_id,
835 irte->dest_iosapic_intin, 845 irte->dest_iosapic_intin,
836 (u32) irte->dest_iosapic_addr); 846 (u32) irte->dest_iosapic_addr);
837 isi_line = irte->dest_iosapic_intin; 847
848 /* search for iosapic */
849 for (isi = iosapic_list; isi; isi = isi->isi_next)
850 if (isi->isi_hpa == dev->mod0)
851 break;
852 if (!isi)
853 return 0; /* no iosapic found, force polling */
838 854
839 /* get vector info for this input line */ 855 /* get vector info for this input line */
840 vi = isi->isi_vector + isi_line; 856 vi = isi->isi_vector + intin;
841 DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); 857 DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", iosapic_intin, vi);
842 858
843 /* If this IRQ line has already been setup, skip it */ 859 /* If this IRQ line has already been setup, skip it */
844 if (vi->irte) 860 if (vi->irte)
@@ -941,8 +957,8 @@ void *iosapic_register(unsigned long hpa)
941 vip->irqline = (unsigned char) cnt; 957 vip->irqline = (unsigned char) cnt;
942 vip->iosapic = isi; 958 vip->iosapic = isi;
943 } 959 }
944 if (!first_isi) 960 isi->isi_next = iosapic_list;
945 first_isi = isi; 961 iosapic_list = isi;
946 return isi; 962 return isi;
947} 963}
948 964
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13a633b1612e..7bf3926aecc0 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -86,10 +86,6 @@ struct mvebu_sw_pci_bridge {
86 u16 secondary_status; 86 u16 secondary_status;
87 u16 membase; 87 u16 membase;
88 u16 memlimit; 88 u16 memlimit;
89 u16 prefmembase;
90 u16 prefmemlimit;
91 u32 prefbaseupper;
92 u32 preflimitupper;
93 u16 iobaseupper; 89 u16 iobaseupper;
94 u16 iolimitupper; 90 u16 iolimitupper;
95 u8 cappointer; 91 u8 cappointer;
@@ -419,15 +415,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
419 break; 415 break;
420 416
421 case PCI_PREF_MEMORY_BASE: 417 case PCI_PREF_MEMORY_BASE:
422 *value = (bridge->prefmemlimit << 16 | bridge->prefmembase); 418 *value = 0;
423 break;
424
425 case PCI_PREF_BASE_UPPER32:
426 *value = bridge->prefbaseupper;
427 break;
428
429 case PCI_PREF_LIMIT_UPPER32:
430 *value = bridge->preflimitupper;
431 break; 419 break;
432 420
433 case PCI_IO_BASE_UPPER16: 421 case PCI_IO_BASE_UPPER16:
@@ -501,19 +489,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
501 mvebu_pcie_handle_membase_change(port); 489 mvebu_pcie_handle_membase_change(port);
502 break; 490 break;
503 491
504 case PCI_PREF_MEMORY_BASE:
505 bridge->prefmembase = value & 0xffff;
506 bridge->prefmemlimit = value >> 16;
507 break;
508
509 case PCI_PREF_BASE_UPPER32:
510 bridge->prefbaseupper = value;
511 break;
512
513 case PCI_PREF_LIMIT_UPPER32:
514 bridge->preflimitupper = value;
515 break;
516
517 case PCI_IO_BASE_UPPER16: 492 case PCI_IO_BASE_UPPER16:
518 bridge->iobaseupper = value & 0xffff; 493 bridge->iobaseupper = value & 0xffff;
519 bridge->iolimitupper = value >> 16; 494 bridge->iolimitupper = value >> 16;
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index bb7ebb22db01..d85009de713d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -3,16 +3,13 @@
3# 3#
4 4
5menuconfig HOTPLUG_PCI 5menuconfig HOTPLUG_PCI
6 tristate "Support for PCI Hotplug" 6 bool "Support for PCI Hotplug"
7 depends on PCI && SYSFS 7 depends on PCI && SYSFS
8 ---help--- 8 ---help---
9 Say Y here if you have a motherboard with a PCI Hotplug controller. 9 Say Y here if you have a motherboard with a PCI Hotplug controller.
10 This allows you to add and remove PCI cards while the machine is 10 This allows you to add and remove PCI cards while the machine is
11 powered up and running. 11 powered up and running.
12 12
13 To compile this driver as a module, choose M here: the
14 module will be called pci_hotplug.
15
16 When in doubt, say N. 13 When in doubt, say N.
17 14
18if HOTPLUG_PCI 15if HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index aac7a40e4a4a..0e0d0f7f63fd 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -92,7 +92,14 @@ int pciehp_unconfigure_device(struct slot *p_slot)
92 if (ret) 92 if (ret)
93 presence = 0; 93 presence = 0;
94 94
95 list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) { 95 /*
96 * Stopping an SR-IOV PF device removes all the associated VFs,
97 * which will update the bus->devices list and confuse the
98 * iterator. Therefore, iterate in reverse so we remove the VFs
99 * first, then the PF. We do the same in pci_stop_bus_device().
100 */
101 list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
102 bus_list) {
96 pci_dev_get(dev); 103 pci_dev_get(dev);
97 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { 104 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
98 pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl); 105 pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index b29e20b7862f..bb7af78e4eed 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -388,7 +388,6 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
388 /* Remove the EADS bridge device itself */ 388 /* Remove the EADS bridge device itself */
389 BUG_ON(!bus->self); 389 BUG_ON(!bus->self);
390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); 390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
391 eeh_remove_bus_device(bus->self, true);
392 pci_stop_and_remove_bus_device(bus->self); 391 pci_stop_and_remove_bus_device(bus->self);
393 392
394 return 0; 393 return 0;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dbdc5f7e2b29..01e264fb50e0 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
317/* ACPI bus type */ 317/* ACPI bus type */
318static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) 318static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
319{ 319{
320 struct pci_dev * pci_dev; 320 struct pci_dev *pci_dev = to_pci_dev(dev);
321 u64 addr; 321 bool is_bridge;
322 u64 addr;
322 323
323 pci_dev = to_pci_dev(dev); 324 /*
325 * pci_is_bridge() is not suitable here, because pci_dev->subordinate
326 * is set only after acpi_pci_find_device() has been called for the
327 * given device.
328 */
329 is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
330 || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
324 /* Please ref to ACPI spec for the syntax of _ADR */ 331 /* Please ref to ACPI spec for the syntax of _ADR */
325 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 332 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
326 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); 333 *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
327 if (!*handle) 334 if (!*handle)
328 return -ENODEV; 335 return -ENODEV;
329 return 0; 336 return 0;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 569f82fc9e22..3b94cfcfa03b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -14,15 +14,12 @@ config PCIEPORTBUS
14# Include service Kconfig here 14# Include service Kconfig here
15# 15#
16config HOTPLUG_PCI_PCIE 16config HOTPLUG_PCI_PCIE
17 tristate "PCI Express Hotplug driver" 17 bool "PCI Express Hotplug driver"
18 depends on HOTPLUG_PCI && PCIEPORTBUS 18 depends on HOTPLUG_PCI && PCIEPORTBUS
19 help 19 help
20 Say Y here if you have a motherboard that supports PCI Express Native 20 Say Y here if you have a motherboard that supports PCI Express Native
21 Hotplug 21 Hotplug
22 22
23 To compile this driver as a module, choose M here: the
24 module will be called pciehp.
25
26 When in doubt, say N. 23 When in doubt, say N.
27 24
28source "drivers/pci/pcie/aer/Kconfig" 25source "drivers/pci/pcie/aer/Kconfig"
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index d254e2379533..64a7de22d9af 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -300,6 +300,47 @@ static void assign_requested_resources_sorted(struct list_head *head,
300 } 300 }
301} 301}
302 302
303static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
304{
305 struct pci_dev_resource *fail_res;
306 unsigned long mask = 0;
307
308 /* check failed type */
309 list_for_each_entry(fail_res, fail_head, list)
310 mask |= fail_res->flags;
311
312 /*
313 * one pref failed resource will set IORESOURCE_MEM,
314 * as we can allocate pref in non-pref range.
315 * Will release all assigned non-pref sibling resources
316 * according to that bit.
317 */
318 return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
319}
320
321static bool pci_need_to_release(unsigned long mask, struct resource *res)
322{
323 if (res->flags & IORESOURCE_IO)
324 return !!(mask & IORESOURCE_IO);
325
326 /* check pref at first */
327 if (res->flags & IORESOURCE_PREFETCH) {
328 if (mask & IORESOURCE_PREFETCH)
329 return true;
330 /* count pref if its parent is non-pref */
331 else if ((mask & IORESOURCE_MEM) &&
332 !(res->parent->flags & IORESOURCE_PREFETCH))
333 return true;
334 else
335 return false;
336 }
337
338 if (res->flags & IORESOURCE_MEM)
339 return !!(mask & IORESOURCE_MEM);
340
341 return false; /* should not get here */
342}
343
303static void __assign_resources_sorted(struct list_head *head, 344static void __assign_resources_sorted(struct list_head *head,
304 struct list_head *realloc_head, 345 struct list_head *realloc_head,
305 struct list_head *fail_head) 346 struct list_head *fail_head)
@@ -312,11 +353,24 @@ static void __assign_resources_sorted(struct list_head *head,
312 * if could do that, could get out early. 353 * if could do that, could get out early.
313 * if could not do that, we still try to assign requested at first, 354 * if could not do that, we still try to assign requested at first,
314 * then try to reassign add_size for some resources. 355 * then try to reassign add_size for some resources.
356 *
357 * Separate three resource type checking if we need to release
358 * assigned resource after requested + add_size try.
359 * 1. if there is io port assign fail, will release assigned
360 * io port.
361 * 2. if there is pref mmio assign fail, release assigned
362 * pref mmio.
363 * if assigned pref mmio's parent is non-pref mmio and there
364 * is non-pref mmio assign fail, will release that assigned
365 * pref mmio.
366 * 3. if there is non-pref mmio assign fail or pref mmio
367 * assigned fail, will release assigned non-pref mmio.
315 */ 368 */
316 LIST_HEAD(save_head); 369 LIST_HEAD(save_head);
317 LIST_HEAD(local_fail_head); 370 LIST_HEAD(local_fail_head);
318 struct pci_dev_resource *save_res; 371 struct pci_dev_resource *save_res;
319 struct pci_dev_resource *dev_res; 372 struct pci_dev_resource *dev_res, *tmp_res;
373 unsigned long fail_type;
320 374
321 /* Check if optional add_size is there */ 375 /* Check if optional add_size is there */
322 if (!realloc_head || list_empty(realloc_head)) 376 if (!realloc_head || list_empty(realloc_head))
@@ -348,6 +402,19 @@ static void __assign_resources_sorted(struct list_head *head,
348 return; 402 return;
349 } 403 }
350 404
405 /* check failed type */
406 fail_type = pci_fail_res_type_mask(&local_fail_head);
407 /* remove not need to be released assigned res from head list etc */
408 list_for_each_entry_safe(dev_res, tmp_res, head, list)
409 if (dev_res->res->parent &&
410 !pci_need_to_release(fail_type, dev_res->res)) {
411 /* remove it from realloc_head list */
412 remove_from_list(realloc_head, dev_res->res);
413 remove_from_list(&save_head, dev_res->res);
414 list_del(&dev_res->list);
415 kfree(dev_res);
416 }
417
351 free_list(&local_fail_head); 418 free_list(&local_fail_head);
352 /* Release assigned resource */ 419 /* Release assigned resource */
353 list_for_each_entry(dev_res, head, list) 420 list_for_each_entry(dev_res, head, list)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5b272bfd261d..2a00239661b3 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1193,6 +1193,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
1193 list_for_each_entry(maps_node, &pinctrl_maps, node) { 1193 list_for_each_entry(maps_node, &pinctrl_maps, node) {
1194 if (maps_node->maps == map) { 1194 if (maps_node->maps == map) {
1195 list_del(&maps_node->node); 1195 list_del(&maps_node->node);
1196 kfree(maps_node);
1196 mutex_unlock(&pinctrl_maps_mutex); 1197 mutex_unlock(&pinctrl_maps_mutex);
1197 return; 1198 return;
1198 } 1199 }
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 6866548fab31..7323cca440b5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1483,6 +1483,7 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
1483 return ret; 1483 return ret;
1484} 1484}
1485 1485
1486#ifdef CONFIG_PM
1486static int pinctrl_single_suspend(struct platform_device *pdev, 1487static int pinctrl_single_suspend(struct platform_device *pdev,
1487 pm_message_t state) 1488 pm_message_t state)
1488{ 1489{
@@ -1505,6 +1506,7 @@ static int pinctrl_single_resume(struct platform_device *pdev)
1505 1506
1506 return pinctrl_force_default(pcs->pctl); 1507 return pinctrl_force_default(pcs->pctl);
1507} 1508}
1509#endif
1508 1510
1509static int pcs_probe(struct platform_device *pdev) 1511static int pcs_probe(struct platform_device *pdev)
1510{ 1512{
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 7956df58d751..31f7d0e04aaa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3785,6 +3785,7 @@ static const struct regulator_desc sh73a0_vccq_mc0_desc = {
3785 3785
3786static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = { 3786static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = {
3787 REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), 3787 REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
3788 REGULATOR_SUPPLY("vqmmc", "ee100000.sdhi"),
3788}; 3789};
3789 3790
3790static const struct regulator_init_data sh73a0_vccq_mc0_init_data = { 3791static const struct regulator_init_data sh73a0_vccq_mc0_init_data = {
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 1fa39a444171..867c9681763c 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -496,7 +496,7 @@ static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
496static const struct sirfsoc_muxmask usp0_muxmask[] = { 496static const struct sirfsoc_muxmask usp0_muxmask[] = {
497 { 497 {
498 .group = 1, 498 .group = 1,
499 .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22), 499 .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
500 }, 500 },
501}; 501};
502 502
@@ -507,8 +507,21 @@ static const struct sirfsoc_padmux usp0_padmux = {
507 .funcval = 0, 507 .funcval = 0,
508}; 508};
509 509
510static const unsigned usp0_pins[] = { 51, 52, 53, 54 }; 510static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
511 511
512static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
513 {
514 .group = 1,
515 .mask = BIT(20) | BIT(21),
516 },
517};
518
519static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
520 .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
521 .muxmask = usp0_uart_nostreamctrl_muxmask,
522};
523
524static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
512static const struct sirfsoc_muxmask usp1_muxmask[] = { 525static const struct sirfsoc_muxmask usp1_muxmask[] = {
513 { 526 {
514 .group = 0, 527 .group = 0,
@@ -822,6 +835,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
822 SIRFSOC_PIN_GROUP("uart2grp", uart2_pins), 835 SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
823 SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins), 836 SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
824 SIRFSOC_PIN_GROUP("usp0grp", usp0_pins), 837 SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
838 SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
839 usp0_uart_nostreamctrl_pins),
825 SIRFSOC_PIN_GROUP("usp1grp", usp1_pins), 840 SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
826 SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins), 841 SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
827 SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins), 842 SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
@@ -862,6 +877,8 @@ static const char * const uart0grp[] = { "uart0grp" };
862static const char * const uart1grp[] = { "uart1grp" }; 877static const char * const uart1grp[] = { "uart1grp" };
863static const char * const uart2grp[] = { "uart2grp" }; 878static const char * const uart2grp[] = { "uart2grp" };
864static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" }; 879static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
880static const char * const usp0_uart_nostreamctrl_grp[] = {
881 "usp0_uart_nostreamctrl_grp" };
865static const char * const usp0grp[] = { "usp0grp" }; 882static const char * const usp0grp[] = { "usp0grp" };
866static const char * const usp1grp[] = { "usp1grp" }; 883static const char * const usp1grp[] = { "usp1grp" };
867static const char * const i2c0grp[] = { "i2c0grp" }; 884static const char * const i2c0grp[] = { "i2c0grp" };
@@ -904,6 +921,9 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
904 SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), 921 SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
905 SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), 922 SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
906 SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), 923 SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
924 SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
925 usp0_uart_nostreamctrl_grp,
926 usp0_uart_nostreamctrl_padmux),
907 SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux), 927 SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
908 SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux), 928 SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
909 SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux), 929 SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 9847ab163829..167f3d00c916 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -180,7 +180,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
180 struct pnp_dev *dev = data; 180 struct pnp_dev *dev = data;
181 struct acpi_resource_dma *dma; 181 struct acpi_resource_dma *dma;
182 struct acpi_resource_vendor_typed *vendor_typed; 182 struct acpi_resource_vendor_typed *vendor_typed;
183 struct resource r; 183 struct resource r = {0};
184 int i, flags; 184 int i, flags;
185 185
186 if (acpi_dev_resource_memory(res, &r) 186 if (acpi_dev_resource_memory(res, &r)
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 3e6db1c1dc29..d95e101ffb43 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -515,6 +515,7 @@ struct pnp_resource *pnp_add_resource(struct pnp_dev *dev,
515 } 515 }
516 516
517 pnp_res->res = *res; 517 pnp_res->res = *res;
518 pnp_res->res.name = dev->name;
518 dev_dbg(&dev->dev, "%pR\n", res); 519 dev_dbg(&dev->dev, "%pR\n", res);
519 return pnp_res; 520 return pnp_res;
520} 521}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index f4f30af2df68..2e8a20cac588 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1715,11 +1715,13 @@ int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops)
1715 (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) 1715 (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops))
1716 port->nscan = NULL; 1716 port->nscan = NULL;
1717 1717
1718 list_for_each_entry(scan, &rio_scans, node) 1718 list_for_each_entry(scan, &rio_scans, node) {
1719 if (scan->mport_id == mport_id) { 1719 if (scan->mport_id == mport_id) {
1720 list_del(&scan->node); 1720 list_del(&scan->node);
1721 kfree(scan); 1721 kfree(scan);
1722 break;
1722 } 1723 }
1724 }
1723 1725
1724 mutex_unlock(&rio_mport_list_lock); 1726 mutex_unlock(&rio_mport_list_lock);
1725 1727
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 02faf3c4e0d5..c2e80d7ca5e2 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
524 if (ret < 0) 524 if (ret < 0)
525 goto out1; 525 goto out1;
526 526
527 device_init_wakeup(&pdev->dev, 1);
528
527 rtc = rtc_device_register(pdev->name, 529 rtc = rtc_device_register(pdev->name,
528 &pdev->dev, &twl_rtc_ops, THIS_MODULE); 530 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
529 if (IS_ERR(rtc)) { 531 if (IS_ERR(rtc)) {
@@ -542,7 +544,6 @@ static int twl_rtc_probe(struct platform_device *pdev)
542 } 544 }
543 545
544 platform_set_drvdata(pdev, rtc); 546 platform_set_drvdata(pdev, rtc);
545 device_init_wakeup(&pdev->dev, 1);
546 return 0; 547 return 0;
547 548
548out2: 549out2:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 17150a778984..451bf99582ff 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2392 rc = cqr->intrc; 2392 rc = cqr->intrc;
2393 else 2393 else
2394 rc = -EIO; 2394 rc = -EIO;
2395
2396 /* kick tasklets */
2397 dasd_schedule_device_bh(device);
2398 if (device->block)
2399 dasd_schedule_block_bh(device->block);
2400
2395 return rc; 2401 return rc;
2396} 2402}
2397 2403
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index fb1c1e0483ed..8ed52aa49122 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1497,7 +1497,7 @@ static inline int buf_in_between(int bufnr, int start, int count)
1497static int handle_inbound(struct qdio_q *q, unsigned int callflags, 1497static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1498 int bufnr, int count) 1498 int bufnr, int count)
1499{ 1499{
1500 int used, diff; 1500 int diff;
1501 1501
1502 qperf_inc(q, inbound_call); 1502 qperf_inc(q, inbound_call);
1503 1503
@@ -1530,7 +1530,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1530 1530
1531set: 1531set:
1532 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1532 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1533 used = atomic_add_return(count, &q->nr_buf_used) - count; 1533 atomic_add(count, &q->nr_buf_used);
1534 1534
1535 if (need_siga_in(q)) 1535 if (need_siga_in(q))
1536 return qdio_siga_input(q); 1536 return qdio_siga_input(q);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f446a7705c3b..d4174b82a1a9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -71,6 +71,7 @@ MODULE_AUTHOR("IBM Corporation");
71MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ 71MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
72 "Copyright IBM Corp. 2006, 2012"); 72 "Copyright IBM Corp. 2006, 2012");
73MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
74MODULE_ALIAS("z90crypt");
74 75
75/* 76/*
76 * Module parameter 77 * Module parameter
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index b6d1f92ed33c..c18c68150e9f 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
38 38
39#define DRV_NAME "fnic" 39#define DRV_NAME "fnic"
40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
41#define DRV_VERSION "1.5.0.22" 41#define DRV_VERSION "1.5.0.23"
42#define PFX DRV_NAME ": " 42#define PFX DRV_NAME ": "
43#define DFX DRV_NAME "%d: " 43#define DFX DRV_NAME "%d: "
44 44
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5f09d1814d26..42e15ee6e1bb 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
642 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); 642 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
643 INIT_WORK(&fnic->event_work, fnic_handle_event); 643 INIT_WORK(&fnic->event_work, fnic_handle_event);
644 skb_queue_head_init(&fnic->fip_frame_queue); 644 skb_queue_head_init(&fnic->fip_frame_queue);
645 spin_lock_irqsave(&fnic_list_lock, flags);
646 if (!fnic_fip_queue) {
647 fnic_fip_queue =
648 create_singlethread_workqueue("fnic_fip_q");
649 if (!fnic_fip_queue) {
650 spin_unlock_irqrestore(&fnic_list_lock, flags);
651 printk(KERN_ERR PFX "fnic FIP work queue "
652 "create failed\n");
653 err = -ENOMEM;
654 goto err_out_free_max_pool;
655 }
656 }
657 spin_unlock_irqrestore(&fnic_list_lock, flags);
658 INIT_LIST_HEAD(&fnic->evlist); 645 INIT_LIST_HEAD(&fnic->evlist);
659 INIT_LIST_HEAD(&fnic->vlans); 646 INIT_LIST_HEAD(&fnic->vlans);
660 } else { 647 } else {
@@ -960,6 +947,13 @@ static int __init fnic_init_module(void)
960 spin_lock_init(&fnic_list_lock); 947 spin_lock_init(&fnic_list_lock);
961 INIT_LIST_HEAD(&fnic_list); 948 INIT_LIST_HEAD(&fnic_list);
962 949
950 fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
951 if (!fnic_fip_queue) {
952 printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
953 err = -ENOMEM;
954 goto err_create_fip_workq;
955 }
956
963 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); 957 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
964 if (!fnic_fc_transport) { 958 if (!fnic_fc_transport) {
965 printk(KERN_ERR PFX "fc_attach_transport error\n"); 959 printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -978,6 +972,8 @@ static int __init fnic_init_module(void)
978err_pci_register: 972err_pci_register:
979 fc_release_transport(fnic_fc_transport); 973 fc_release_transport(fnic_fc_transport);
980err_fc_transport: 974err_fc_transport:
975 destroy_workqueue(fnic_fip_queue);
976err_create_fip_workq:
981 destroy_workqueue(fnic_event_queue); 977 destroy_workqueue(fnic_event_queue);
982err_create_fnic_workq: 978err_create_fnic_workq:
983 kmem_cache_destroy(fnic_io_req_cache); 979 kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 7b082157eb79..99d2930b18c8 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -185,7 +185,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
185 cmd_iu->_r_c = 0; 185 cmd_iu->_r_c = 0;
186 186
187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
188 task->ssp_task.cmd->cmd_len / sizeof(u32)); 188 (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
189} 189}
190 190
191static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 191static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 9bb020ac089c..0d30ca849e8f 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task)
491 struct isci_tmf tmf; 491 struct isci_tmf tmf;
492 int ret = TMF_RESP_FUNC_FAILED; 492 int ret = TMF_RESP_FUNC_FAILED;
493 unsigned long flags; 493 unsigned long flags;
494 int target_done_already = 0;
494 495
495 /* Get the isci_request reference from the task. Note that 496 /* Get the isci_request reference from the task. Note that
496 * this check does not depend on the pending request list 497 * this check does not depend on the pending request list
@@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task)
505 /* If task is already done, the request isn't valid */ 506 /* If task is already done, the request isn't valid */
506 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && 507 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
507 (task->task_state_flags & SAS_TASK_AT_INITIATOR) && 508 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
508 old_request) 509 old_request) {
509 idev = isci_get_device(task->dev->lldd_dev); 510 idev = isci_get_device(task->dev->lldd_dev);
510 511 target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
512 &old_request->flags);
513 }
511 spin_unlock(&task->task_state_lock); 514 spin_unlock(&task->task_state_lock);
512 spin_unlock_irqrestore(&ihost->scic_lock, flags); 515 spin_unlock_irqrestore(&ihost->scic_lock, flags);
513 516
@@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task)
561 564
562 if (task->task_proto == SAS_PROTOCOL_SMP || 565 if (task->task_proto == SAS_PROTOCOL_SMP ||
563 sas_protocol_ata(task->task_proto) || 566 sas_protocol_ata(task->task_proto) ||
564 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || 567 target_done_already ||
565 test_bit(IDEV_GONE, &idev->flags)) { 568 test_bit(IDEV_GONE, &idev->flags)) {
566 569
567 spin_unlock_irqrestore(&ihost->scic_lock, flags); 570 spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0177295599e0..1f0ca68409d4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
3547 break; 3547 break;
3548 } 3548 }
3549 3549
3550 /* 3550 if (megasas_transition_to_ready(instance, 0)) {
3551 * We expect the FW state to be READY 3551 atomic_set(&instance->fw_reset_no_pci_access, 1);
3552 */ 3552 instance->instancet->adp_reset
3553 if (megasas_transition_to_ready(instance, 0)) 3553 (instance, instance->reg_set);
3554 goto fail_ready_state; 3554 atomic_set(&instance->fw_reset_no_pci_access, 0);
3555 dev_info(&instance->pdev->dev,
3556 "megasas: FW restarted successfully from %s!\n",
3557 __func__);
3558
3559 /*waitting for about 30 second before retry*/
3560 ssleep(30);
3561
3562 if (megasas_transition_to_ready(instance, 0))
3563 goto fail_ready_state;
3564 }
3555 3565
3556 /* 3566 /*
3557 * MSI-X host index 0 is common for all adapter. 3567 * MSI-X host index 0 is common for all adapter.
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index f14665a6293d..6b1b4e91e53f 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1857,11 +1857,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1857 goto out; 1857 goto out;
1858 } 1858 }
1859 1859
1860 /* error info record present */ 1860 /*
1861 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1861 * error info record present; slot->response is 32 bit aligned but may
1862 * not be 64 bit aligned, so check for zero in two 32 bit reads
1863 */
1864 if (unlikely((rx_desc & RXQ_ERR)
1865 && (*((u32 *)slot->response)
1866 || *(((u32 *)slot->response) + 1)))) {
1862 mv_dprintk("port %d slot %d rx_desc %X has error info" 1867 mv_dprintk("port %d slot %d rx_desc %X has error info"
1863 "%016llX.\n", slot->port->sas_port.id, slot_idx, 1868 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1864 rx_desc, (u64)(*(u64 *)slot->response)); 1869 rx_desc, get_unaligned_le64(slot->response));
1865 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1870 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1866 tstat->resp = SAS_TASK_COMPLETE; 1871 tstat->resp = SAS_TASK_COMPLETE;
1867 goto out; 1872 goto out;
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 60e2fb7f2dca..d6b19dc80bee 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
39#include <linux/irq.h> 39#include <linux/irq.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <asm/unaligned.h>
42#include <scsi/libsas.h> 43#include <scsi/libsas.h>
43#include <scsi/scsi.h> 44#include <scsi/scsi.h>
44#include <scsi/scsi_tcq.h> 45#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ef481db942..ef0a5481b9dd 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -419,6 +419,8 @@ qla2x00_start_scsi(srb_t *sp)
419 __constant_cpu_to_le16(CF_SIMPLE_TAG); 419 __constant_cpu_to_le16(CF_SIMPLE_TAG);
420 break; 420 break;
421 } 421 }
422 } else {
423 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 } 424 }
423 425
424 /* Load SCSI command packet. */ 426 /* Load SCSI command packet. */
@@ -1307,11 +1309,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1307 fcp_cmnd->task_attribute = TSK_ORDERED; 1309 fcp_cmnd->task_attribute = TSK_ORDERED;
1308 break; 1310 break;
1309 default: 1311 default:
1310 fcp_cmnd->task_attribute = 0; 1312 fcp_cmnd->task_attribute = TSK_SIMPLE;
1311 break; 1313 break;
1312 } 1314 }
1313 } else { 1315 } else {
1314 fcp_cmnd->task_attribute = 0; 1316 fcp_cmnd->task_attribute = TSK_SIMPLE;
1315 } 1317 }
1316 1318
1317 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1319 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
@@ -1525,7 +1527,12 @@ qla24xx_start_scsi(srb_t *sp)
1525 case ORDERED_QUEUE_TAG: 1527 case ORDERED_QUEUE_TAG:
1526 cmd_pkt->task = TSK_ORDERED; 1528 cmd_pkt->task = TSK_ORDERED;
1527 break; 1529 break;
1530 default:
1531 cmd_pkt->task = TSK_SIMPLE;
1532 break;
1528 } 1533 }
1534 } else {
1535 cmd_pkt->task = TSK_SIMPLE;
1529 } 1536 }
1530 1537
1531 /* Load SCSI command packet. */ 1538 /* Load SCSI command packet. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3b1ea34e1f5a..eaa808e6ba91 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1031{ 1031{
1032 int i, result; 1032 int i, result;
1033 1033
1034 if (sdev->skip_vpd_pages)
1035 goto fail;
1036
1034 /* Ask for all the pages supported by this device */ 1037 /* Ask for all the pages supported by this device */
1035 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); 1038 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1036 if (result) 1039 if (result)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 80f39b8b0223..86fcf2c313ad 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -838,10 +838,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
838 838
839static void sd_unprep_fn(struct request_queue *q, struct request *rq) 839static void sd_unprep_fn(struct request_queue *q, struct request *rq)
840{ 840{
841 struct scsi_cmnd *SCpnt = rq->special;
842
841 if (rq->cmd_flags & REQ_DISCARD) { 843 if (rq->cmd_flags & REQ_DISCARD) {
842 free_page((unsigned long)rq->buffer); 844 free_page((unsigned long)rq->buffer);
843 rq->buffer = NULL; 845 rq->buffer = NULL;
844 } 846 }
847 if (SCpnt->cmnd != rq->cmd) {
848 mempool_free(SCpnt->cmnd, sd_cdb_pool);
849 SCpnt->cmnd = NULL;
850 SCpnt->cmd_len = 0;
851 }
845} 852}
846 853
847/** 854/**
@@ -1720,21 +1727,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1720 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) 1727 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1721 sd_dif_complete(SCpnt, good_bytes); 1728 sd_dif_complete(SCpnt, good_bytes);
1722 1729
1723 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1724 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
1725
1726 /* We have to print a failed command here as the
1727 * extended CDB gets freed before scsi_io_completion()
1728 * is called.
1729 */
1730 if (result)
1731 scsi_print_command(SCpnt);
1732
1733 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1734 SCpnt->cmnd = NULL;
1735 SCpnt->cmd_len = 0;
1736 }
1737
1738 return good_bytes; 1730 return good_bytes;
1739} 1731}
1740 1732
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 2168258fb2c3..74b88efde6ad 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
751 751
752 vscsi->affinity_hint_set = true; 752 vscsi->affinity_hint_set = true;
753 } else { 753 } else {
754 for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++) 754 for (i = 0; i < vscsi->num_queues; i++)
755 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); 755 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
756 756
757 vscsi->affinity_hint_set = false; 757 vscsi->affinity_hint_set = false;
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 8a6bb37910da..81b9adb6e766 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -103,6 +103,16 @@ static void altera_spi_chipsel(struct spi_device *spi, int value)
103 } 103 }
104} 104}
105 105
106static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
107{
108 return 0;
109}
110
111static int altera_spi_setup(struct spi_device *spi)
112{
113 return 0;
114}
115
106static inline unsigned int hw_txbyte(struct altera_spi *hw, int count) 116static inline unsigned int hw_txbyte(struct altera_spi *hw, int count)
107{ 117{
108 if (hw->tx) { 118 if (hw->tx) {
@@ -221,6 +231,7 @@ static int altera_spi_probe(struct platform_device *pdev)
221 master->bus_num = pdev->id; 231 master->bus_num = pdev->id;
222 master->num_chipselect = 16; 232 master->num_chipselect = 16;
223 master->mode_bits = SPI_CS_HIGH; 233 master->mode_bits = SPI_CS_HIGH;
234 master->setup = altera_spi_setup;
224 235
225 hw = spi_master_get_devdata(master); 236 hw = spi_master_get_devdata(master);
226 platform_set_drvdata(pdev, hw); 237 platform_set_drvdata(pdev, hw);
@@ -229,6 +240,7 @@ static int altera_spi_probe(struct platform_device *pdev)
229 hw->bitbang.master = spi_master_get(master); 240 hw->bitbang.master = spi_master_get(master);
230 if (!hw->bitbang.master) 241 if (!hw->bitbang.master)
231 return err; 242 return err;
243 hw->bitbang.setup_transfer = altera_spi_setupxfer;
232 hw->bitbang.chipselect = altera_spi_chipsel; 244 hw->bitbang.chipselect = altera_spi_chipsel;
233 hw->bitbang.txrx_bufs = altera_spi_txrx; 245 hw->bitbang.txrx_bufs = altera_spi_txrx;
234 246
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 222d3e37fc28..707966bd5610 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
609 else 609 else
610 buf = (void *)t->tx_buf; 610 buf = (void *)t->tx_buf;
611 t->tx_dma = dma_map_single(&spi->dev, buf, 611 t->tx_dma = dma_map_single(&spi->dev, buf,
612 t->len, DMA_FROM_DEVICE); 612 t->len, DMA_TO_DEVICE);
613 if (!t->tx_dma) { 613 if (!t->tx_dma) {
614 ret = -EFAULT; 614 ret = -EFAULT;
615 goto err_tx_map; 615 goto err_tx_map;
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 2ad3d74ac021..150d85453c27 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -174,6 +174,17 @@ static void nuc900_spi_gobusy(struct nuc900_spi *hw)
174 spin_unlock_irqrestore(&hw->lock, flags); 174 spin_unlock_irqrestore(&hw->lock, flags);
175} 175}
176 176
177static int nuc900_spi_setupxfer(struct spi_device *spi,
178 struct spi_transfer *t)
179{
180 return 0;
181}
182
183static int nuc900_spi_setup(struct spi_device *spi)
184{
185 return 0;
186}
187
177static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) 188static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
178{ 189{
179 return hw->tx ? hw->tx[count] : 0; 190 return hw->tx ? hw->tx[count] : 0;
@@ -366,8 +377,10 @@ static int nuc900_spi_probe(struct platform_device *pdev)
366 master->num_chipselect = hw->pdata->num_cs; 377 master->num_chipselect = hw->pdata->num_cs;
367 master->bus_num = hw->pdata->bus_num; 378 master->bus_num = hw->pdata->bus_num;
368 hw->bitbang.master = hw->master; 379 hw->bitbang.master = hw->master;
380 hw->bitbang.setup_transfer = nuc900_spi_setupxfer;
369 hw->bitbang.chipselect = nuc900_spi_chipsel; 381 hw->bitbang.chipselect = nuc900_spi_chipsel;
370 hw->bitbang.txrx_bufs = nuc900_spi_txrx; 382 hw->bitbang.txrx_bufs = nuc900_spi_txrx;
383 hw->bitbang.master->setup = nuc900_spi_setup;
371 384
372 hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 385 hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
373 if (hw->res == NULL) { 386 if (hw->res == NULL) {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index eb53df27e7ea..63e2070c6c14 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -434,6 +434,9 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
434 dma_cap_mask_t mask; 434 dma_cap_mask_t mask;
435 int ret; 435 int ret;
436 436
437 if (is_polling(sdd))
438 return 0;
439
437 dma_cap_zero(mask); 440 dma_cap_zero(mask);
438 dma_cap_set(DMA_SLAVE, mask); 441 dma_cap_set(DMA_SLAVE, mask);
439 442
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index fb56fcfdf65e..09a942852593 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -233,6 +233,21 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
233 return 0; 233 return 0;
234} 234}
235 235
236static int xilinx_spi_setup(struct spi_device *spi)
237{
238 /* always return 0, we can not check the number of bits.
239 * There are cases when SPI setup is called before any driver is
240 * there, in that case the SPI core defaults to 8 bits, which we
241 * do not support in some cases. But if we return an error, the
242 * SPI device would not be registered and no driver can get hold of it
243 * When the driver is there, it will call SPI setup again with the
244 * correct number of bits per transfer.
245 * If a driver setups with the wrong bit number, it will fail when
246 * it tries to do a transfer
247 */
248 return 0;
249}
250
236static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) 251static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
237{ 252{
238 u8 sr; 253 u8 sr;
@@ -360,6 +375,7 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
360 xspi->bitbang.chipselect = xilinx_spi_chipselect; 375 xspi->bitbang.chipselect = xilinx_spi_chipselect;
361 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; 376 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
362 xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs; 377 xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
378 xspi->bitbang.master->setup = xilinx_spi_setup;
363 init_completion(&xspi->done); 379 init_completion(&xspi->done);
364 380
365 if (!request_mem_region(mem->start, resource_size(mem), 381 if (!request_mem_region(mem->start, resource_size(mem),
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3227ebeae3f1..57d8b3444600 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -118,8 +118,6 @@ source "drivers/staging/ozwpan/Kconfig"
118 118
119source "drivers/staging/gdm72xx/Kconfig" 119source "drivers/staging/gdm72xx/Kconfig"
120 120
121source "drivers/staging/csr/Kconfig"
122
123source "drivers/staging/silicom/Kconfig" 121source "drivers/staging/silicom/Kconfig"
124 122
125source "drivers/staging/ced1401/Kconfig" 123source "drivers/staging/ced1401/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 4d79ebe2de06..429321f15105 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
52obj-$(CONFIG_ANDROID) += android/ 52obj-$(CONFIG_ANDROID) += android/
53obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/ 53obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
54obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/ 54obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
55obj-$(CONFIG_CSR_WIFI) += csr/
56obj-$(CONFIG_NET_VENDOR_SILICOM) += silicom/ 55obj-$(CONFIG_NET_VENDOR_SILICOM) += silicom/
57obj-$(CONFIG_CED1401) += ced1401/ 56obj-$(CONFIG_CED1401) += ced1401/
58obj-$(CONFIG_DRM_IMX) += imx-drm/ 57obj-$(CONFIG_DRM_IMX) += imx-drm/
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 080abf2faf97..a8c344422a77 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
469 unsigned long nr_segs, loff_t ppos) 469 unsigned long nr_segs, loff_t ppos)
470{ 470{
471 struct logger_log *log = file_get_log(iocb->ki_filp); 471 struct logger_log *log = file_get_log(iocb->ki_filp);
472 size_t orig = log->w_off; 472 size_t orig;
473 struct logger_entry header; 473 struct logger_entry header;
474 struct timespec now; 474 struct timespec now;
475 ssize_t ret = 0; 475 ssize_t ret = 0;
@@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
490 490
491 mutex_lock(&log->mutex); 491 mutex_lock(&log->mutex);
492 492
493 orig = log->w_off;
494
493 /* 495 /*
494 * Fix up any readers, pulling them forward to the first readable 496 * Fix up any readers, pulling them forward to the first readable
495 * entry after (what will be) the new write offset. We do this now 497 * entry after (what will be) the new write offset. We do this now
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index b10f739b7e3e..fa8da9aada30 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -9,4 +9,4 @@ TODO:
9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and 9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
10copy: 10copy:
11 Ian Abbott <abbotti@mev.co.uk> 11 Ian Abbott <abbotti@mev.co.uk>
12 Frank Mori Hess <fmhess@users.sourceforge.net> 12 H Hartley Sweeten <hsweeten@visionengravers.com>
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 8647518259f6..f4a197b2d1fd 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1413,22 +1413,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1413 DPRINTK("subdevice busy\n"); 1413 DPRINTK("subdevice busy\n");
1414 return -EBUSY; 1414 return -EBUSY;
1415 } 1415 }
1416 s->busy = file;
1417 1416
1418 /* make sure channel/gain list isn't too long */ 1417 /* make sure channel/gain list isn't too long */
1419 if (cmd.chanlist_len > s->len_chanlist) { 1418 if (cmd.chanlist_len > s->len_chanlist) {
1420 DPRINTK("channel/gain list too long %u > %d\n", 1419 DPRINTK("channel/gain list too long %u > %d\n",
1421 cmd.chanlist_len, s->len_chanlist); 1420 cmd.chanlist_len, s->len_chanlist);
1422 ret = -EINVAL; 1421 return -EINVAL;
1423 goto cleanup;
1424 } 1422 }
1425 1423
1426 /* make sure channel/gain list isn't too short */ 1424 /* make sure channel/gain list isn't too short */
1427 if (cmd.chanlist_len < 1) { 1425 if (cmd.chanlist_len < 1) {
1428 DPRINTK("channel/gain list too short %u < 1\n", 1426 DPRINTK("channel/gain list too short %u < 1\n",
1429 cmd.chanlist_len); 1427 cmd.chanlist_len);
1430 ret = -EINVAL; 1428 return -EINVAL;
1431 goto cleanup;
1432 } 1429 }
1433 1430
1434 async->cmd = cmd; 1431 async->cmd = cmd;
@@ -1438,8 +1435,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1438 kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL); 1435 kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
1439 if (!async->cmd.chanlist) { 1436 if (!async->cmd.chanlist) {
1440 DPRINTK("allocation failed\n"); 1437 DPRINTK("allocation failed\n");
1441 ret = -ENOMEM; 1438 return -ENOMEM;
1442 goto cleanup;
1443 } 1439 }
1444 1440
1445 if (copy_from_user(async->cmd.chanlist, user_chanlist, 1441 if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1491,6 +1487,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1491 1487
1492 comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING); 1488 comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
1493 1489
1490 /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
1491 * comedi_read() or comedi_write() */
1492 s->busy = file;
1494 ret = s->do_cmd(dev, s); 1493 ret = s->do_cmd(dev, s);
1495 if (ret == 0) 1494 if (ret == 0)
1496 return 0; 1495 return 0;
@@ -1705,6 +1704,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
1705 void *file) 1704 void *file)
1706{ 1705{
1707 struct comedi_subdevice *s; 1706 struct comedi_subdevice *s;
1707 int ret;
1708 1708
1709 if (arg >= dev->n_subdevices) 1709 if (arg >= dev->n_subdevices)
1710 return -EINVAL; 1710 return -EINVAL;
@@ -1721,7 +1721,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
1721 if (s->busy != file) 1721 if (s->busy != file)
1722 return -EBUSY; 1722 return -EBUSY;
1723 1723
1724 return do_cancel(dev, s); 1724 ret = do_cancel(dev, s);
1725 if (comedi_get_subdevice_runflags(s) & SRF_USER)
1726 wake_up_interruptible(&s->async->wait_head);
1727
1728 return ret;
1725} 1729}
1726 1730
1727/* 1731/*
@@ -2053,11 +2057,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
2053 2057
2054 if (!comedi_is_subdevice_running(s)) { 2058 if (!comedi_is_subdevice_running(s)) {
2055 if (count == 0) { 2059 if (count == 0) {
2060 mutex_lock(&dev->mutex);
2056 if (comedi_is_subdevice_in_error(s)) 2061 if (comedi_is_subdevice_in_error(s))
2057 retval = -EPIPE; 2062 retval = -EPIPE;
2058 else 2063 else
2059 retval = 0; 2064 retval = 0;
2060 do_become_nonbusy(dev, s); 2065 do_become_nonbusy(dev, s);
2066 mutex_unlock(&dev->mutex);
2061 } 2067 }
2062 break; 2068 break;
2063 } 2069 }
@@ -2156,11 +2162,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2156 2162
2157 if (n == 0) { 2163 if (n == 0) {
2158 if (!comedi_is_subdevice_running(s)) { 2164 if (!comedi_is_subdevice_running(s)) {
2165 mutex_lock(&dev->mutex);
2159 do_become_nonbusy(dev, s); 2166 do_become_nonbusy(dev, s);
2160 if (comedi_is_subdevice_in_error(s)) 2167 if (comedi_is_subdevice_in_error(s))
2161 retval = -EPIPE; 2168 retval = -EPIPE;
2162 else 2169 else
2163 retval = 0; 2170 retval = 0;
2171 mutex_unlock(&dev->mutex);
2164 break; 2172 break;
2165 } 2173 }
2166 if (file->f_flags & O_NONBLOCK) { 2174 if (file->f_flags & O_NONBLOCK) {
@@ -2198,9 +2206,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2198 buf += n; 2206 buf += n;
2199 break; /* makes device work like a pipe */ 2207 break; /* makes device work like a pipe */
2200 } 2208 }
2201 if (comedi_is_subdevice_idle(s) && 2209 if (comedi_is_subdevice_idle(s)) {
2202 async->buf_read_count - async->buf_write_count == 0) { 2210 mutex_lock(&dev->mutex);
2203 do_become_nonbusy(dev, s); 2211 if (async->buf_read_count - async->buf_write_count == 0)
2212 do_become_nonbusy(dev, s);
2213 mutex_unlock(&dev->mutex);
2204 } 2214 }
2205 set_current_state(TASK_RUNNING); 2215 set_current_state(TASK_RUNNING);
2206 remove_wait_queue(&async->wait_head, &wait); 2216 remove_wait_queue(&async->wait_head, &wait);
diff --git a/drivers/staging/csr/Kconfig b/drivers/staging/csr/Kconfig
deleted file mode 100644
index ad2a1096e920..000000000000
--- a/drivers/staging/csr/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
1config CSR_WIFI
2 tristate "CSR wireless driver"
3 depends on MMC && CFG80211_WEXT && INET
4 select WIRELESS_EXT
5 select WEXT_PRIV
6 help
7 Driver for the CSR wireless SDIO device.
8
9 If unsure, select N.
diff --git a/drivers/staging/csr/LICENSE.txt b/drivers/staging/csr/LICENSE.txt
deleted file mode 100644
index 364853e5fedc..000000000000
--- a/drivers/staging/csr/LICENSE.txt
+++ /dev/null
@@ -1,39 +0,0 @@
1Permission is hereby granted, free of charge, to any person obtaining
2a copy of this software and associated documentation files (the
3"Software"), to deal in the Software without restriction, including
4without limitation the rights to use, copy, modify, merge, publish,
5distribute, sublicense, and/or sell copies of the Software, and to
6permit persons to whom the Software is furnished to do so, subject to
7the following conditions:
8
9The above copyright notice and this permission notice shall be
10included in all copies or substantial portions of the Software.
11
12Except as contained in this notice, the names of above-listed
13copyright holders and the names of any contributors shall not be used
14in advertising or otherwise to promote the sale, use or other dealings
15in this Software without prior written authorization.
16
17THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20NONINFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
21CONTRIBUTORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
23OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24THE SOFTWARE.
25
26Alternatively, this software may be distributed under the terms of the
27GNU General Public License ("GPL") version 2 as published
28by the Free Software Foundation.
29
30As a special exception, if other files instantiate templates or use
31macros or inline functions from this file, or you compile this file
32and link it with other works to produce a work based on this file,
33this file does not by itself cause the resulting work to be covered by
34the GNU General Public License. However the source code for this file
35must still be made available in accordance with section (3) of the GNU
36General Public License.
37
38This exception does not invalidate any other reasons why a work based
39on this file might be covered by the GNU General Public License.
diff --git a/drivers/staging/csr/Makefile b/drivers/staging/csr/Makefile
deleted file mode 100644
index dbd135a8b177..000000000000
--- a/drivers/staging/csr/Makefile
+++ /dev/null
@@ -1,73 +0,0 @@
1ccflags-y := -DCSR_SME_USERSPACE -DCSR_SUPPORT_SME -DREMOTE_SYS_SAP -DCSR_WIFI_SECURITY_WAPI_ENABLE -DENABLE_SHUTDOWN -DUNIFI_DEBUG
2ccflags-y += -DSDIO_EXPORTS_STRUCT_DEVICE -DCSR_WIFI_SUPPORT_MMC_DRIVER -DCSR_WIFI_SINGLE_FUNCTION -DCSR_WIFI_SPLIT_PATCH
3ccflags-y += -DCSR_SUPPORT_WEXT -DREMOTE_SYS_SAP -DREMOTE_MGT_SAP -DCSR_WIFI_SECURITY_WAPI_ENABLE -DCSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND -DENABLE_SHUTDOWN -DCSR_WIFI_NME_ENABLE -DCSR_WIFI_AP_ENABLE -DCSR_SUPPORT_WEXT_AP -DCSR_WIFI_REQUEUE_PACKET_TO_HAL
4
5obj-$(CONFIG_CSR_WIFI) += csr_wifi.o
6obj-$(CONFIG_CSR_WIFI) += csr_helper.o
7
8csr_wifi-y := bh.o \
9 data_tx.o \
10 drv.o \
11 firmware.o \
12 inet.o \
13 init_hw.o \
14 io.o \
15 monitor.o \
16 netdev.o \
17 os.o \
18 putest.o \
19 sdio_events.o \
20 sdio_mmc.o \
21 sdio_stubs.o \
22 sme_blocking.o \
23 ul_int.o \
24 unifi_dbg.o \
25 unifi_event.o \
26 unifi_pdu_processing.o \
27 unifi_sme.o \
28 csr_wifi_hip_card_sdio.o \
29 csr_wifi_hip_card_sdio_intr.o \
30 csr_wifi_hip_card_sdio_mem.o \
31 csr_wifi_hip_chiphelper.o \
32 csr_wifi_hip_download.o \
33 csr_wifi_hip_dump.o \
34 csr_wifi_hip_packing.o \
35 csr_wifi_hip_send.o \
36 csr_wifi_hip_signals.o \
37 csr_wifi_hip_ta_sampling.o \
38 csr_wifi_hip_udi.o \
39 csr_wifi_hip_unifi_signal_names.o \
40 csr_wifi_hip_xbv.o \
41 csr_wifi_nme_ap_converter_init.o \
42 csr_wifi_nme_ap_free_downstream_contents.o \
43 csr_wifi_nme_ap_free_upstream_contents.o \
44 csr_wifi_nme_ap_serialize.o \
45 csr_wifi_nme_ap_sef.o \
46 csr_wifi_router_ctrl_sef.o \
47 csr_wifi_router_sef.o \
48 csr_wifi_router_transport.o \
49 csr_wifi_sme_sef.o \
50 csr_wifi_sme_converter_init.o \
51 csr_wifi_sme_free_downstream_contents.o \
52 csr_wifi_sme_free_upstream_contents.o \
53 csr_wifi_sme_serialize.o \
54 csr_wifi_router_ctrl_converter_init.o \
55 csr_wifi_router_ctrl_free_downstream_contents.o \
56 csr_wifi_router_ctrl_free_upstream_contents.o \
57 csr_wifi_router_ctrl_serialize.o \
58 csr_wifi_router_converter_init.o \
59 csr_wifi_router_free_downstream_contents.o \
60 csr_wifi_router_free_upstream_contents.o \
61 csr_wifi_router_serialize.o \
62 sme_mgt.o \
63 sme_sys.o \
64 sme_userspace.o \
65 sme_wext.o \
66 wext_events.o
67
68csr_helper-y := csr_time.o \
69 csr_util.o \
70 csr_framework_ext.o \
71 csr_wifi_serialize_primitive_types.o \
72 csr_serialize_primitive_types.o \
73 csr_msgconv.o
diff --git a/drivers/staging/csr/bh.c b/drivers/staging/csr/bh.c
deleted file mode 100644
index d795852ccb1c..000000000000
--- a/drivers/staging/csr/bh.c
+++ /dev/null
@@ -1,404 +0,0 @@
1/*
2 * ---------------------------------------------------------------------------
3 * FILE: bh.c
4 *
5 * PURPOSE:
6 * Provides an implementation for the driver bottom-half.
7 * It is part of the porting exercise in Linux.
8 *
9 * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
10 *
11 * Refer to LICENSE.txt included with this source code for details on
12 * the license terms.
13 *
14 * ---------------------------------------------------------------------------
15 */
16#include "csr_wifi_hip_unifi.h"
17#include "unifi_priv.h"
18#include <linux/sched/rt.h>
19
20/*
21 * ---------------------------------------------------------------------------
22 * uf_start_thread
23 *
24 * Helper function to start a new thread.
25 *
26 * Arguments:
27 * priv Pointer to OS driver structure for the device.
28 * thread Pointer to the thread object
29 * func The thread function
30 *
31 * Returns:
32 * 0 on success or else a Linux error code.
33 * ---------------------------------------------------------------------------
34 */
35int uf_start_thread(unifi_priv_t *priv,
36 struct uf_thread *thread, int (*func)(void *))
37{
38 if (thread->thread_task != NULL) {
39 unifi_error(priv, "%s thread already started\n", thread->name);
40 return 0;
41 }
42
43 /* Start the kernel thread that handles all h/w accesses. */
44 thread->thread_task = kthread_run(func, priv, "%s", thread->name);
45 if (IS_ERR(thread->thread_task))
46 return PTR_ERR(thread->thread_task);
47
48 /* Module parameter overides the thread priority */
49 if (bh_priority != -1) {
50 if (bh_priority >= 0 && bh_priority <= MAX_RT_PRIO) {
51 struct sched_param param;
52 priv->bh_thread.prio = bh_priority;
53 unifi_trace(priv, UDBG1,
54 "%s thread (RT) priority = %d\n",
55 thread->name, bh_priority);
56 param.sched_priority = bh_priority;
57 sched_setscheduler(thread->thread_task,
58 SCHED_FIFO, &param);
59 } else if (bh_priority > MAX_RT_PRIO &&
60 bh_priority <= MAX_PRIO) {
61 priv->bh_thread.prio = bh_priority;
62 unifi_trace(priv, UDBG1, "%s thread priority = %d\n",
63 thread->name,
64 PRIO_TO_NICE(bh_priority));
65 set_user_nice(thread->thread_task,
66 PRIO_TO_NICE(bh_priority));
67 } else {
68 priv->bh_thread.prio = DEFAULT_PRIO;
69 unifi_warning(priv,
70 "%s thread unsupported (%d) priority\n",
71 thread->name, bh_priority);
72 }
73 } else
74 priv->bh_thread.prio = DEFAULT_PRIO;
75 unifi_trace(priv, UDBG2, "Started %s thread\n", thread->name);
76
77 return 0;
78} /* uf_start_thread() */
79
80
81/*
82 * ---------------------------------------------------------------------------
83 * uf_stop_thread
84 *
85 * Helper function to stop a thread.
86 *
87 * Arguments:
88 * priv Pointer to OS driver structure for the device.
89 * thread Pointer to the thread object
90 *
91 * Returns:
92 *
93 * ---------------------------------------------------------------------------
94 */
95void uf_stop_thread(unifi_priv_t *priv, struct uf_thread *thread)
96{
97 if (!thread->thread_task) {
98 unifi_notice(priv, "%s thread is already stopped\n",
99 thread->name);
100 return;
101 }
102
103 unifi_trace(priv, UDBG2, "Stopping %s thread\n", thread->name);
104
105 kthread_stop(thread->thread_task);
106 thread->thread_task = NULL;
107
108} /* uf_stop_thread() */
109
110
111
112/*
113 * ---------------------------------------------------------------------------
114 * uf_wait_for_thread_to_stop
115 *
116 * Helper function to wait until a thread is stopped.
117 *
118 * Arguments:
119 * priv Pointer to OS driver structure for the device.
120 *
121 * Returns:
122 *
123 * ---------------------------------------------------------------------------
124 */
125void
126uf_wait_for_thread_to_stop(unifi_priv_t *priv, struct uf_thread *thread)
127{
128 /*
129 * kthread_stop() cannot handle the thread exiting while
130 * kthread_should_stop() is false, so sleep until kthread_stop()
131 * wakes us up
132 */
133 unifi_trace(priv, UDBG2, "%s waiting for the stop signal.\n",
134 thread->name);
135 set_current_state(TASK_INTERRUPTIBLE);
136 if (!kthread_should_stop()) {
137 unifi_trace(priv, UDBG2, "%s schedule....\n", thread->name);
138 schedule();
139 }
140
141 thread->thread_task = NULL;
142 unifi_trace(priv, UDBG2, "%s exiting....\n", thread->name);
143} /* uf_wait_for_thread_to_stop() */
144
145
146/*
147 * ---------------------------------------------------------------------------
148 * handle_bh_error
149 *
150 * This function reports an error returned from the HIP core bottom-half.
151 * Normally, implemented during the porting exercise, passing the error
152 * to the SME using unifi_sys_wifi_off_ind().
153 * The SME will try to reset the device and go through
154 * the initialisation of the UniFi.
155 *
156 * Arguments:
157 * priv Pointer to OS driver structure for the device.
158 *
159 * Returns:
160 * None.
161 * ---------------------------------------------------------------------------
162 */
163static void
164handle_bh_error(unifi_priv_t *priv)
165{
166 netInterface_priv_t *interfacePriv;
167 u8 conf_param = CONFIG_IND_ERROR;
168 u8 interfaceTag;
169
170
171 /* Block unifi_run_bh() until the error has been handled. */
172 priv->bh_thread.block_thread = 1;
173
174 /* Consider UniFi to be uninitialised */
175 priv->init_progress = UNIFI_INIT_NONE;
176
177 /* Stop the network traffic */
178 for (interfaceTag = 0;
179 interfaceTag < CSR_WIFI_NUM_INTERFACES; interfaceTag++) {
180 interfacePriv = priv->interfacePriv[interfaceTag];
181 if (interfacePriv->netdev_registered)
182 netif_carrier_off(priv->netdev[interfaceTag]);
183 }
184
185#ifdef CSR_NATIVE_LINUX
186 /* Force any client waiting on an mlme_wait_for_reply() to abort. */
187 uf_abort_mlme(priv);
188
189 /* Cancel any pending workqueue tasks */
190 flush_workqueue(priv->unifi_workqueue);
191
192#endif /* CSR_NATIVE_LINUX */
193
194 unifi_error(priv,
195 "handle_bh_error: fatal error is reported to the SME.\n");
196 /* Notify the clients (SME or unifi_manager) for the error. */
197 ul_log_config_ind(priv, &conf_param, sizeof(u8));
198
199} /* handle_bh_error() */
200
201
202
203/*
204 * ---------------------------------------------------------------------------
205 * bh_thread_function
206 *
207 * All hardware access happens in this thread.
208 * This means there is no need for locks on the hardware and we don't need
209 * to worry about reentrancy with the SDIO library.
210 * Provides and example implementation on how to call unifi_bh(), which
211 * is part of the HIP core API.
212 *
213 * It processes the events generated by unifi_run_bh() to serialise calls
214 * to unifi_bh(). It also demonstrates how the timeout parameter passed in
215 * and returned from unifi_bh() needs to be handled.
216 *
217 * Arguments:
218 * arg Pointer to OS driver structure for the device.
219 *
220 * Returns:
221 * None.
222 *
223 * Notes:
224 * When the bottom half of the driver needs to process signals, events,
225 * or simply the host status (i.e sleep mode), it invokes unifi_run_bh().
226 * Since we need all SDIO transaction to be in a single thread, the
227 * unifi_run_bh() will wake up this thread to process it.
228 *
229 * ---------------------------------------------------------------------------
230 */
231static int bh_thread_function(void *arg)
232{
233 unifi_priv_t *priv = (unifi_priv_t *)arg;
234 CsrResult csrResult;
235 long ret;
236 u32 timeout, t;
237 struct uf_thread *this_thread;
238
239 unifi_trace(priv, UDBG2, "bh_thread_function starting\n");
240
241 this_thread = &priv->bh_thread;
242
243 t = timeout = 0;
244 while (!kthread_should_stop()) {
245 /*
246 * wait until an error occurs,
247 * or we need to process something.
248 */
249 unifi_trace(priv, UDBG3, "bh_thread goes to sleep.\n");
250
251 if (timeout > 0) {
252 /* Convert t in ms to jiffies */
253 t = msecs_to_jiffies(timeout);
254 ret = wait_event_interruptible_timeout(
255 this_thread->wakeup_q,
256 (this_thread->wakeup_flag && !this_thread->block_thread) ||
257 kthread_should_stop(),
258 t);
259 timeout = (ret > 0) ? jiffies_to_msecs(ret) : 0;
260 } else {
261 ret = wait_event_interruptible(this_thread->wakeup_q,
262 (this_thread->wakeup_flag && !this_thread->block_thread) ||
263 kthread_should_stop());
264 }
265
266 if (kthread_should_stop()) {
267 unifi_trace(priv, UDBG2,
268 "bh_thread: signalled to exit\n");
269 break;
270 }
271
272 if (ret < 0) {
273 unifi_notice(priv,
274 "bh_thread: wait_event returned %d, thread will exit\n",
275 ret);
276 uf_wait_for_thread_to_stop(priv, this_thread);
277 break;
278 }
279
280 this_thread->wakeup_flag = 0;
281
282 unifi_trace(priv, UDBG3, "bh_thread calls unifi_bh().\n");
283
284 CsrSdioClaim(priv->sdio);
285 csrResult = unifi_bh(priv->card, &timeout);
286 if (csrResult != CSR_RESULT_SUCCESS) {
287 if (csrResult == CSR_WIFI_HIP_RESULT_NO_DEVICE) {
288 CsrSdioRelease(priv->sdio);
289 uf_wait_for_thread_to_stop(priv, this_thread);
290 break;
291 }
292 /* Errors must be delivered to the error task */
293 handle_bh_error(priv);
294 }
295 CsrSdioRelease(priv->sdio);
296 }
297
298 /*
299 * I would normally try to call csr_sdio_remove_irq() here to make sure
300 * that we do not get any interrupts while this thread is not running.
301 * However, the MMC/SDIO driver tries to kill its' interrupt thread.
302 * The kernel threads implementation does not allow to kill threads
303 * from a signalled to stop thread.
304 * So, instead call csr_sdio_linux_remove_irq() always after calling
305 * uf_stop_thread() to kill this thread.
306 */
307
308 unifi_trace(priv, UDBG2, "bh_thread exiting....\n");
309 return 0;
310} /* bh_thread_function() */
311
312
313/*
314 * ---------------------------------------------------------------------------
315 * uf_init_bh
316 *
317 * Helper function to start the bottom half of the driver.
318 * All we need to do here is start the I/O bh thread.
319 *
320 * Arguments:
321 * priv Pointer to OS driver structure for the device.
322 *
323 * Returns:
324 * 0 on success or else a Linux error code.
325 * ---------------------------------------------------------------------------
326 */
327int
328uf_init_bh(unifi_priv_t *priv)
329{
330 int r;
331
332 /* Enable mlme interface. */
333 priv->io_aborted = 0;
334
335
336 /* Start the BH thread */
337 r = uf_start_thread(priv, &priv->bh_thread, bh_thread_function);
338 if (r) {
339 unifi_error(priv,
340 "uf_init_bh: failed to start the BH thread.\n");
341 return r;
342 }
343
344 /* Allow interrupts */
345 r = csr_sdio_linux_install_irq(priv->sdio);
346 if (r) {
347 unifi_error(priv,
348 "uf_init_bh: failed to install the IRQ.\n");
349
350 uf_stop_thread(priv, &priv->bh_thread);
351 }
352
353 return r;
354} /* uf_init_bh() */
355
356
357/*
358 * ---------------------------------------------------------------------------
359 * unifi_run_bh
360 *
361 * Part of the HIP core lib API, implemented in the porting exercise.
362 * The bottom half of the driver calls this function when
363 * it wants to process anything that requires access to unifi.
364 * We need to call unifi_bh() which in this implementation is done
365 * by waking up the I/O thread.
366 *
367 * Arguments:
368 * ospriv Pointer to OS driver structure for the device.
369 *
370 * Returns:
371 * 0 on success or else a Linux error code.
372 *
373 * Notes:
374 * ---------------------------------------------------------------------------
375 */
376CsrResult unifi_run_bh(void *ospriv)
377{
378 unifi_priv_t *priv = ospriv;
379
380 /*
381 * If an error has occurred, we discard silently all messages from the bh
382 * until the error has been processed and the unifi has been
383 * reinitialised.
384 */
385 if (priv->bh_thread.block_thread == 1) {
386 unifi_trace(priv, UDBG3, "unifi_run_bh: discard message.\n");
387 /*
388 * Do not try to acknowledge a pending interrupt here.
389 * This function is called by unifi_send_signal()
390 * which in turn can be running in an atomic or 'disabled irq'
391 * level if a signal is sent from a workqueue task
392 * (i.e multicass addresses set). We can not hold the SDIO lock
393 * because it might sleep.
394 */
395 return CSR_RESULT_FAILURE;
396 }
397
398 priv->bh_thread.wakeup_flag = 1;
399 /* wake up I/O thread */
400 wake_up_interruptible(&priv->bh_thread.wakeup_q);
401
402 return CSR_RESULT_SUCCESS;
403} /* unifi_run_bh() */
404
diff --git a/drivers/staging/csr/csr_framework_ext.c b/drivers/staging/csr/csr_framework_ext.c
deleted file mode 100644
index 98122bce1427..000000000000
--- a/drivers/staging/csr/csr_framework_ext.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2010
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#include <linux/kernel.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/freezer.h>
15#include <linux/semaphore.h>
16#include <linux/slab.h>
17#include <linux/bitops.h>
18
19#include "csr_framework_ext.h"
20
21/*----------------------------------------------------------------------------*
22 * NAME
23 * CsrThreadSleep
24 *
25 * DESCRIPTION
26 * Sleep for a given period.
27 *
28 * RETURNS
29 * void
30 *
31 *----------------------------------------------------------------------------*/
32void CsrThreadSleep(u16 sleepTimeInMs)
33{
34 unsigned long t;
35
36 /* Convert t in ms to jiffies and round up */
37 t = ((sleepTimeInMs * HZ) + 999) / 1000;
38 schedule_timeout_uninterruptible(t);
39}
40EXPORT_SYMBOL_GPL(CsrThreadSleep);
diff --git a/drivers/staging/csr/csr_framework_ext.h b/drivers/staging/csr/csr_framework_ext.h
deleted file mode 100644
index 6d26ac6173b0..000000000000
--- a/drivers/staging/csr/csr_framework_ext.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef CSR_FRAMEWORK_EXT_H__
2#define CSR_FRAMEWORK_EXT_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13#include "csr_result.h"
14#include "csr_framework_ext_types.h"
15
16/* Result codes */
17#define CSR_FE_RESULT_NO_MORE_EVENTS ((CsrResult) 0x0001)
18#define CSR_FE_RESULT_INVALID_POINTER ((CsrResult) 0x0002)
19#define CSR_FE_RESULT_INVALID_HANDLE ((CsrResult) 0x0003)
20#define CSR_FE_RESULT_NO_MORE_MUTEXES ((CsrResult) 0x0004)
21#define CSR_FE_RESULT_TIMEOUT ((CsrResult) 0x0005)
22#define CSR_FE_RESULT_NO_MORE_THREADS ((CsrResult) 0x0006)
23
24/* Thread priorities */
25#define CSR_THREAD_PRIORITY_HIGHEST ((u16) 0)
26#define CSR_THREAD_PRIORITY_HIGH ((u16) 1)
27#define CSR_THREAD_PRIORITY_NORMAL ((u16) 2)
28#define CSR_THREAD_PRIORITY_LOW ((u16) 3)
29#define CSR_THREAD_PRIORITY_LOWEST ((u16) 4)
30
31#define CSR_EVENT_WAIT_INFINITE ((u16) 0xFFFF)
32
33void CsrThreadSleep(u16 sleepTimeInMs);
34
35#endif
diff --git a/drivers/staging/csr/csr_framework_ext_types.h b/drivers/staging/csr/csr_framework_ext_types.h
deleted file mode 100644
index 575598cf69b2..000000000000
--- a/drivers/staging/csr/csr_framework_ext_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef CSR_FRAMEWORK_EXT_TYPES_H__
2#define CSR_FRAMEWORK_EXT_TYPES_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13#ifdef __KERNEL__
14#include <linux/kthread.h>
15#include <linux/semaphore.h>
16#else
17#include <pthread.h>
18#endif
19
20#ifdef __KERNEL__
21
22typedef struct semaphore CsrMutexHandle;
23
24#else /* __KERNEL __ */
25
26typedef pthread_mutex_t CsrMutexHandle;
27
28#endif /* __KERNEL__ */
29
30#endif
diff --git a/drivers/staging/csr/csr_log.h b/drivers/staging/csr/csr_log.h
deleted file mode 100644
index 982941043ddc..000000000000
--- a/drivers/staging/csr/csr_log.h
+++ /dev/null
@@ -1,223 +0,0 @@
1#ifndef CSR_LOG_H__
2#define CSR_LOG_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13#include "csr_sched.h"
14#include "csr_prim_defs.h"
15#include "csr_msgconv.h"
16
17/*
18 * Log filtering
19 */
20
21/*----------------------------------------------------*/
22/* Filtering on environment specific log levels */
23/*----------------------------------------------------*/
24typedef u32 CsrLogLevelEnvironment;
25#define CSR_LOG_LEVEL_ENVIRONMENT_OFF ((CsrLogLevelEnvironment) 0x00000000) /* No environment data/events are logged */
26#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_ACL ((CsrLogLevelEnvironment) 0x00000001) /* BlueCore Channel Interface HCI Acl data are logged */
27#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_HCI ((CsrLogLevelEnvironment) 0x00000002) /* BlueCore Channel Interface HCI Cmd/Evt data are logged */
28#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_SCO ((CsrLogLevelEnvironment) 0x00000004) /* BlueCore Channel Interface HCI Sco data are logged */
29#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_VENDOR ((CsrLogLevelEnvironment) 0x00000008) /* BlueCore Channel Interface HCI Vendor specific data are logged (This includes BCCMD, HQ, VM etc) */
30#define CSR_LOG_LEVEL_ENVIRONMENT_TRANSPORTS ((CsrLogLevelEnvironment) 0x00000010) /* Transport protocol data is logged (This includes transport protocols like BCSP, H4 etc.) */
31#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_REG ((CsrLogLevelEnvironment) 0x00000020) /* Background Interrupt registration events are logged */
32#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_UNREG ((CsrLogLevelEnvironment) 0x00000040) /* Background Interrupt unregistration events are logged */
33#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_SET ((CsrLogLevelEnvironment) 0x00000080) /* Background Interrupt set events are logged */
34#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_START ((CsrLogLevelEnvironment) 0x00000100) /* Background Interrupt start events are logged */
35#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_DONE ((CsrLogLevelEnvironment) 0x00000200) /* Background Interrupt done events are logged */
36#define CSR_LOG_LEVEL_ENVIRONMENT_PROTO ((CsrLogLevelEnvironment) 0x00000400) /* Transport protocol events are logged */
37#define CSR_LOG_LEVEL_ENVIRONMENT_PROTO_LOC ((CsrLogLevelEnvironment) 0x00000800) /* The Location where the transport protocol event occurred are logged NB: This is a supplement to CSR_LOG_LEVEL_ENVIRONMENT_PROTO, it has no effect without it */
38/* The bit masks between here are reserved for future usage */
39#define CSR_LOG_LEVEL_ENVIRONMENT_ALL ((CsrLogLevelEnvironment) 0xFFFFFFFF) /* All possible environment data/events are logged WARNING: By using this define the application also accepts future possible environment data/events in the logs */
40
41/*----------------------------------------------------*/
42/* Filtering on task specific log levels */
43/*----------------------------------------------------*/
44typedef u32 CsrLogLevelTask;
45#define CSR_LOG_LEVEL_TASK_OFF ((CsrLogLevelTask) 0x00000000) /* No events are logged for this task */
46#define CSR_LOG_LEVEL_TASK_TEXT ((CsrLogLevelTask) 0x00000001) /* Text strings printed by a task are logged NB: This bit does not affect the CSR_LOG_TEXT_LEVEL interface. This has to be configured separately */
47#define CSR_LOG_LEVEL_TASK_TEXT_LOC ((CsrLogLevelTask) 0x00000002) /* The locaction where the text string call occurred are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_TEXT, it has no effect without it */
48#define CSR_LOG_LEVEL_TASK_STATE ((CsrLogLevelTask) 0x00000004) /* FSM state transitions in a task are logged */
49#define CSR_LOG_LEVEL_TASK_STATE_NAME ((CsrLogLevelTask) 0x00000008) /* The name of each state in a FSM state transition are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_STATE, it has no effect without it */
50#define CSR_LOG_LEVEL_TASK_STATE_LOC ((CsrLogLevelTask) 0x00000010) /* The location where the FSM state transition occurred are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_STATE, it has no effect without it */
51#define CSR_LOG_LEVEL_TASK_TASK_SWITCH ((CsrLogLevelTask) 0x00000020) /* Activation and deactiation of a task are logged */
52#define CSR_LOG_LEVEL_TASK_MESSAGE_PUT ((CsrLogLevelTask) 0x00000080) /* Message put operations are logged */
53#define CSR_LOG_LEVEL_TASK_MESSAGE_PUT_LOC ((CsrLogLevelTask) 0x00000100) /* The location where a message was sent are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_MESSAGE_PUT, it has no effect without it */
54#define CSR_LOG_LEVEL_TASK_MESSAGE_GET ((CsrLogLevelTask) 0x00000200) /* Message get operations are logged */
55#define CSR_LOG_LEVEL_TASK_MESSAGE_QUEUE_PUSH ((CsrLogLevelTask) 0x00000400) /* Message push operations are logged */
56#define CSR_LOG_LEVEL_TASK_MESSAGE_QUEUE_POP ((CsrLogLevelTask) 0x00000800) /* Message pop operations are logged */
57#define CSR_LOG_LEVEL_TASK_PRIM_ONLY_TYPE ((CsrLogLevelTask) 0x00001000) /* Only the type of primitives in messages are logged. By default the entire primitive is serialized and logged */
58#define CSR_LOG_LEVEL_TASK_PRIM_APPLY_LIMIT ((CsrLogLevelTask) 0x00002000) /* An upper limit (defined by CSR_LOG_PRIM_SIZE_UPPER_LIMIT) is applied to how much of a primitive in a message are logged. NB: This limit is only applied if CSR_LOG_LEVEL_TASK_PRIM_ONLY_TYPE is _not_ defined */
59#define CSR_LOG_LEVEL_TASK_TIMER_IN ((CsrLogLevelTask) 0x00004000) /* TimedEventIn events are logged */
60#define CSR_LOG_LEVEL_TASK_TIMER_IN_LOC ((CsrLogLevelTask) 0x00008000) /* The location where a timer was started are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_TIMER_IN, it has no effect without it */
61#define CSR_LOG_LEVEL_TASK_TIMER_CANCEL ((CsrLogLevelTask) 0x00010000) /* TimedEventCancel events are logged */
62#define CSR_LOG_LEVEL_TASK_TIMER_CANCEL_LOC ((CsrLogLevelTask) 0x00020000) /* The location where a timer was cancelled are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_TIMER_CANCEL, it has no effect without it */
63#define CSR_LOG_LEVEL_TASK_TIMER_FIRE ((CsrLogLevelTask) 0x00040000) /* TimedEventFire events are logged */
64#define CSR_LOG_LEVEL_TASK_TIMER_DONE ((CsrLogLevelTask) 0x00080000) /* TimedEventDone events are logged */
65/* The bit masks between here are reserved for future usage */
66#define CSR_LOG_LEVEL_TASK_ALL ((CsrLogLevelTask) 0xFFFFFFFF & ~(CSR_LOG_LEVEL_TASK_PRIM_ONLY_TYPE | CSR_LOG_LEVEL_TASK_PRIM_APPLY_LIMIT)) /* All info possible to log for a task are logged. WARNING: By using this define the application also accepts future possible task data/events in the logs */
67
68u8 CsrLogEnvironmentIsFiltered(CsrLogLevelEnvironment level);
69CsrLogLevelTask CsrLogTaskFilterGet(CsrSchedQid taskId);
70u8 CsrLogTaskIsFiltered(CsrSchedQid taskId, CsrLogLevelTask level);
71
72/*
73 * Logging stuff
74 */
75#define CSR_LOG_STRINGIFY_REAL(a) (#a)
76#define CSR_LOG_STRINGIFY(a) CSR_LOG_STRINGIFY_REAL(a)
77
78typedef struct {
79 u16 primitiveType;
80 const char *primitiveName;
81 CsrMsgConvMsgEntry *messageConv; /* Private - do not use */
82} CsrLogPrimitiveInformation;
83
84typedef struct {
85 const char *techVer;
86 u32 primitiveInfoCount;
87 CsrLogPrimitiveInformation *primitiveInfo;
88} CsrLogTechInformation;
89
90/*---------------------------------*/
91/* Tech logging */
92/*---------------------------------*/
93typedef u8 bitmask8_t;
94typedef u16 bitmask16_t;
95typedef u32 bitmask32_t;
96
97#ifdef CSR_LOG_ENABLE
98#ifdef CSR_LOG_INCLUDE_FILE_NAME_AND_LINE_NUMBER
99/* DEPRECATED - replaced by csr_log_text.h */
100#define CSR_LOG_TEXT(text) \
101 do { \
102 if (!CsrLogTaskIsFiltered(CsrSchedTaskQueueGet(), CSR_LOG_LEVEL_TASK_TEXT)) { \
103 CsrLogTaskText(text, __LINE__, __FILE__); \
104 } \
105 } while (0)
106#else
107/* DEPRECATED - replaced by csr_log_text.h */
108#define CSR_LOG_TEXT(text) \
109 do { \
110 if (!CsrLogTaskIsFiltered(CsrSchedTaskQueueGet(), CSR_LOG_LEVEL_TASK_TEXT)) { \
111 CsrLogTaskText(text, 0, NULL); \
112 } \
113 } while (0)
114#endif
115#else
116#define CSR_LOG_TEXT(text)
117#endif
118
119/* DEPRECATED - replaced by csr_log_text.h */
120void CsrLogTaskText(const char *text,
121 u32 line,
122 const char *file);
123
124#define CSR_LOG_STATE_TRANSITION_MASK_FSM_NAME (0x001)
125#define CSR_LOG_STATE_TRANSITION_MASK_NEXT_STATE (0x002)
126#define CSR_LOG_STATE_TRANSITION_MASK_NEXT_STATE_STR (0x004)
127#define CSR_LOG_STATE_TRANSITION_MASK_PREV_STATE (0x008)
128#define CSR_LOG_STATE_TRANSITION_MASK_PREV_STATE_STR (0x010)
129#define CSR_LOG_STATE_TRANSITION_MASK_EVENT (0x020)
130#define CSR_LOG_STATE_TRANSITION_MASK_EVENT_STR (0x040)
131
132/* DEPRECATED - replaced by csr_log_text.h */
133void CsrLogStateTransition(bitmask16_t mask,
134 u32 identifier,
135 const char *fsm_name,
136 u32 prev_state,
137 const char *prev_state_str,
138 u32 in_event,
139 const char *in_event_str,
140 u32 next_state,
141 const char *next_state_str,
142 u32 line,
143 const char *file);
144
145/*---------------------------------*/
146/* BSP logging */
147/*---------------------------------*/
148void CsrLogSchedInit(u8 thread_id);
149void CsrLogSchedDeinit(u8 thread_id);
150
151void CsrLogSchedStart(u8 thread_id);
152void CsrLogSchedStop(u8 thread_id);
153
154void CsrLogInitTask(u8 thread_id, CsrSchedQid tskid, const char *tskName);
155void CsrLogDeinitTask(u16 task_id);
156
157void CsrLogActivate(CsrSchedQid tskid);
158void CsrLogDeactivate(CsrSchedQid tskid);
159
160#define SYNERGY_SERIALIZER_TYPE_DUMP (0x000)
161#define SYNERGY_SERIALIZER_TYPE_SER (0x001)
162
163void CsrLogMessagePut(u32 line,
164 const char *file,
165 CsrSchedQid src_task_id,
166 CsrSchedQid dst_taskid,
167 CsrSchedMsgId msg_id,
168 u16 prim_type,
169 const void *msg);
170
171void CsrLogMessageGet(CsrSchedQid src_task_id,
172 CsrSchedQid dst_taskid,
173 u8 get_res,
174 CsrSchedMsgId msg_id,
175 u16 prim_type,
176 const void *msg);
177
178void CsrLogTimedEventIn(u32 line,
179 const char *file,
180 CsrSchedQid task_id,
181 CsrSchedTid tid,
182 u32 requested_delay,
183 u16 fniarg,
184 const void *fnvarg);
185
186void CsrLogTimedEventFire(CsrSchedQid task_id,
187 CsrSchedTid tid);
188
189void CsrLogTimedEventDone(CsrSchedQid task_id,
190 CsrSchedTid tid);
191
192void CsrLogTimedEventCancel(u32 line,
193 const char *file,
194 CsrSchedQid task_id,
195 CsrSchedTid tid,
196 u8 cancel_res);
197
198void CsrLogBgintRegister(u8 thread_id,
199 CsrSchedBgint irq,
200 const char *callback,
201 const void *ptr);
202void CsrLogBgintUnregister(CsrSchedBgint irq);
203void CsrLogBgintSet(CsrSchedBgint irq);
204void CsrLogBgintServiceStart(CsrSchedBgint irq);
205void CsrLogBgintServiceDone(CsrSchedBgint irq);
206
207void CsrLogExceptionStateEvent(u16 prim_type,
208 CsrPrim msg_type,
209 u16 state,
210 u32 line,
211 const char *file);
212void CsrLogExceptionGeneral(u16 prim_type,
213 u16 state,
214 const char *text,
215 u32 line,
216 const char *file);
217void CsrLogExceptionWarning(u16 prim_type,
218 u16 state,
219 const char *text,
220 u32 line,
221 const char *file);
222
223#endif
diff --git a/drivers/staging/csr/csr_log_configure.h b/drivers/staging/csr/csr_log_configure.h
deleted file mode 100644
index 283647cf9702..000000000000
--- a/drivers/staging/csr/csr_log_configure.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef CSR_LOG_CONFIGURE_H__
2#define CSR_LOG_CONFIGURE_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11 *****************************************************************************/
12
13#include "csr_log.h"
14
15/*--------------------------------------------*/
16/* Filtering on log text warning levels */
17/*--------------------------------------------*/
18typedef u32 CsrLogLevelText;
19#define CSR_LOG_LEVEL_TEXT_OFF ((CsrLogLevelText) 0x0000)
20
21#define CSR_LOG_LEVEL_TEXT_CRITICAL ((CsrLogLevelText) 0x0001)
22#define CSR_LOG_LEVEL_TEXT_ERROR ((CsrLogLevelText) 0x0002)
23#define CSR_LOG_LEVEL_TEXT_WARNING ((CsrLogLevelText) 0x0004)
24#define CSR_LOG_LEVEL_TEXT_INFO ((CsrLogLevelText) 0x0008)
25#define CSR_LOG_LEVEL_TEXT_DEBUG ((CsrLogLevelText) 0x0010)
26
27#define CSR_LOG_LEVEL_TEXT_ALL ((CsrLogLevelText) 0xFFFF)
28
29/* The log text interface is used by both scheduler tasks and components outside the scheduler context.
30 * Therefore a CsrLogTextTaskId is introduced. It is effectively considered as two u16's. The lower
31 * 16 bits corresponds one2one with the scheduler queueId's (CsrSchedQid) and as such these bits can not be used
32 * by components outside scheduler tasks. The upper 16 bits are allocated for use of components outside the
33 * scheduler like drivers etc. Components in this range is defined independently by each technology. To avoid
34 * clashes the technologies are only allowed to assign values within the same restrictive range as allies to
35 * primitive identifiers. eg. for the framework components outside the scheduler is only allowed to assign
36 * taskId's in the range 0x0600xxxx to 0x06FFxxxx. And so on for other technologies. */
37typedef u32 CsrLogTextTaskId;
38
39#endif
diff --git a/drivers/staging/csr/csr_log_text.h b/drivers/staging/csr/csr_log_text.h
deleted file mode 100644
index cfcf64aa6225..000000000000
--- a/drivers/staging/csr/csr_log_text.h
+++ /dev/null
@@ -1,124 +0,0 @@
1#ifndef CSR_LOG_TEXT_H__
2#define CSR_LOG_TEXT_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13#include "csr_log_configure.h"
14
15typedef struct CsrLogSubOrigin
16{
17 u16 subOriginNumber; /* Id of the given SubOrigin */
18 const char *subOriginName; /* Prefix Text for this SubOrigin */
19} CsrLogSubOrigin;
20
21/* Register a task which is going to use the CSR_LOG_TEXT_XXX interface */
22#ifdef CSR_LOG_ENABLE
23void CsrLogTextRegister(CsrLogTextTaskId taskId, const char *taskName, u16 subOriginsLength, const CsrLogSubOrigin *subOrigins);
24#else
25#define CsrLogTextRegister(taskId, taskName, subOriginsLength, subOrigins)
26#endif
27
28/* CRITICAL: Conditions that are threatening to the integrity/stability of the
29 system as a whole. */
30#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_CRITICAL_DISABLE)
31void CsrLogTextCritical(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
32void CsrLogTextBufferCritical(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
33#define CSR_LOG_TEXT_CRITICAL(taskId_subOrigin_formatString_varargs) CsrLogTextCritical taskId_subOrigin_formatString_varargs
34#define CSR_LOG_TEXT_CONDITIONAL_CRITICAL(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_CRITICAL(logtextargs);}}
35#define CSR_LOG_TEXT_BUFFER_CRITICAL(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferCritical taskId_subOrigin_length_buffer_formatString_varargs
36#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_CRITICAL(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_CRITICAL(logtextbufferargs);}}
37#else
38#define CSR_LOG_TEXT_CRITICAL(taskId_subOrigin_formatString_varargs)
39#define CSR_LOG_TEXT_CONDITIONAL_CRITICAL(condition, logtextargs)
40#define CSR_LOG_TEXT_BUFFER_CRITICAL(taskId_subOrigin_length_buffer_formatString_varargs)
41#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_CRITICAL(condition, logtextbufferargs)
42#endif
43
44/* ERROR: Malfunction of a component rendering it unable to operate correctly,
45 causing lack of functionality but not loss of system integrity/stability. */
46#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_ERROR_DISABLE)
47void CsrLogTextError(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
48void CsrLogTextBufferError(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
49#define CSR_LOG_TEXT_ERROR(taskId_subOrigin_formatString_varargs) CsrLogTextError taskId_subOrigin_formatString_varargs
50#define CSR_LOG_TEXT_CONDITIONAL_ERROR(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_ERROR(logtextargs);}}
51#define CSR_LOG_TEXT_BUFFER_ERROR(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferError taskId_subOrigin_length_buffer_formatString_varargs
52#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_ERROR(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_ERROR(logtextbufferargs);}}
53#else
54#define CSR_LOG_TEXT_ERROR(taskId_subOrigin_formatString_varargs)
55#define CSR_LOG_TEXT_CONDITIONAL_ERROR(condition, logtextargs)
56#define CSR_LOG_TEXT_BUFFER_ERROR(taskId_subOrigin_length_buffer_formatString_varargs)
57#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_ERROR(condition, logtextbufferargs)
58#endif
59
60/* WARNING: Conditions that are unexpected and indicative of possible problems
61 or violations of specifications, where the result of such deviations does not
62 lead to malfunction of the component. */
63#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_WARNING_DISABLE)
64void CsrLogTextWarning(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
65void CsrLogTextBufferWarning(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
66#define CSR_LOG_TEXT_WARNING(taskId_subOrigin_formatString_varargs) CsrLogTextWarning taskId_subOrigin_formatString_varargs
67#define CSR_LOG_TEXT_CONDITIONAL_WARNING(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_WARNING(logtextargs);}}
68#define CSR_LOG_TEXT_BUFFER_WARNING(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferWarning taskId_subOrigin_length_buffer_formatString_varargs
69#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_WARNING(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_WARNING(logtextbufferargs);}}
70#else
71#define CSR_LOG_TEXT_WARNING(taskId_subOrigin_formatString_varargs)
72#define CSR_LOG_TEXT_CONDITIONAL_WARNING(condition, logtextargs)
73#define CSR_LOG_TEXT_BUFFER_WARNING(taskId_subOrigin_length_buffer_formatString_varargs)
74#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_WARNING(condition, logtextbufferargs)
75#endif
76
77/* INFO: Important events that may aid in determining the conditions under which
78 the more severe conditions are encountered. */
79#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_INFO_DISABLE)
80void CsrLogTextInfo(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
81void CsrLogTextBufferInfo(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
82#define CSR_LOG_TEXT_INFO(taskId_subOrigin_formatString_varargs) CsrLogTextInfo taskId_subOrigin_formatString_varargs
83#define CSR_LOG_TEXT_CONDITIONAL_INFO(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_INFO(logtextargs);}}
84#define CSR_LOG_TEXT_BUFFER_INFO(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferInfo taskId_subOrigin_length_buffer_formatString_varargs
85#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_INFO(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_INFO(logtextbufferargs);}}
86#else
87#define CSR_LOG_TEXT_INFO(taskId_subOrigin_formatString_varargs)
88#define CSR_LOG_TEXT_CONDITIONAL_INFO(condition, logtextargs)
89#define CSR_LOG_TEXT_BUFFER_INFO(taskId_subOrigin_length_buffer_formatString_varargs)
90#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_INFO(condition, logtextbufferargs)
91#endif
92
93/* DEBUG: Similar to INFO, but dedicated to events that occur more frequently. */
94#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_DEBUG_DISABLE)
95void CsrLogTextDebug(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
96void CsrLogTextBufferDebug(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
97#define CSR_LOG_TEXT_DEBUG(taskId_subOrigin_formatString_varargs) CsrLogTextDebug taskId_subOrigin_formatString_varargs
98#define CSR_LOG_TEXT_CONDITIONAL_DEBUG(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_DEBUG(logtextargs);}}
99#define CSR_LOG_TEXT_BUFFER_DEBUG(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferDebug taskId_subOrigin_length_buffer_formatString_varargs
100#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_DEBUG(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_DEBUG(logtextbufferargs);}}
101#else
102#define CSR_LOG_TEXT_DEBUG(taskId_subOrigin_formatString_varargs)
103#define CSR_LOG_TEXT_CONDITIONAL_DEBUG(condition, logtextargs)
104#define CSR_LOG_TEXT_BUFFER_DEBUG(taskId_subOrigin_length_buffer_formatString_varargs)
105#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_DEBUG(condition, logtextbufferargs)
106#endif
107
108/* CSR_LOG_TEXT_ASSERT (CRITICAL) */
109#ifdef CSR_LOG_ENABLE
110#define CSR_LOG_TEXT_ASSERT(origin, suborigin, condition) \
111 {if (!(condition)) {CSR_LOG_TEXT_CRITICAL((origin, suborigin, "Assertion \"%s\" failed at %s:%u", #condition, __FILE__, __LINE__));}}
112#else
113#define CSR_LOG_TEXT_ASSERT(origin, suborigin, condition)
114#endif
115
116/* CSR_LOG_TEXT_UNHANDLED_PRIM (CRITICAL) */
117#ifdef CSR_LOG_ENABLE
118#define CSR_LOG_TEXT_UNHANDLED_PRIMITIVE(origin, suborigin, primClass, primType) \
119 CSR_LOG_TEXT_CRITICAL((origin, suborigin, "Unhandled primitive 0x%04X:0x%04X at %s:%u", primClass, primType, __FILE__, __LINE__))
120#else
121#define CSR_LOG_TEXT_UNHANDLED_PRIMITIVE(origin, suborigin, primClass, primType)
122#endif
123
124#endif
diff --git a/drivers/staging/csr/csr_macro.h b/drivers/staging/csr/csr_macro.h
deleted file mode 100644
index c47f1d91b6fa..000000000000
--- a/drivers/staging/csr/csr_macro.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef CSR_MACRO_H__
2#define CSR_MACRO_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13#include <linux/types.h>
14
15#define FALSE (0)
16#define TRUE (1)
17
18/*------------------------------------------------------------------*/
19/* Endian conversion */
20/*------------------------------------------------------------------*/
21#define CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr) (((u16) ((u8 *) (ptr))[0]) | ((u16) ((u8 *) (ptr))[1]) << 8)
22#define CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr) (((u32) ((u8 *) (ptr))[0]) | ((u32) ((u8 *) (ptr))[1]) << 8 | \
23 ((u32) ((u8 *) (ptr))[2]) << 16 | ((u32) ((u8 *) (ptr))[3]) << 24)
24#define CSR_COPY_UINT16_TO_LITTLE_ENDIAN(uint, ptr) ((u8 *) (ptr))[0] = ((u8) ((uint) & 0x00FF)); \
25 ((u8 *) (ptr))[1] = ((u8) ((uint) >> 8))
26#define CSR_COPY_UINT32_TO_LITTLE_ENDIAN(uint, ptr) ((u8 *) (ptr))[0] = ((u8) ((uint) & 0x000000FF)); \
27 ((u8 *) (ptr))[1] = ((u8) (((uint) >> 8) & 0x000000FF)); \
28 ((u8 *) (ptr))[2] = ((u8) (((uint) >> 16) & 0x000000FF)); \
29 ((u8 *) (ptr))[3] = ((u8) (((uint) >> 24) & 0x000000FF))
30
31/*------------------------------------------------------------------*/
32/* Misc */
33/*------------------------------------------------------------------*/
34/* Use this macro on unused local variables that cannot be removed (such as
35 unused function parameters). This will quell warnings from certain compilers
36 and static code analysis tools like Lint and Valgrind. */
37#define CSR_UNUSED(x) ((void) (x))
38
39#endif
diff --git a/drivers/staging/csr/csr_msg_transport.h b/drivers/staging/csr/csr_msg_transport.h
deleted file mode 100644
index 8d88e7836567..000000000000
--- a/drivers/staging/csr/csr_msg_transport.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef CSR_MSG_TRANSPORT_H__
2#define CSR_MSG_TRANSPORT_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13#ifndef CsrMsgTransport
14#define CsrMsgTransport CsrSchedMessagePut
15#endif
16
17#endif /* CSR_MSG_TRANSPORT */
diff --git a/drivers/staging/csr/csr_msgconv.c b/drivers/staging/csr/csr_msgconv.c
deleted file mode 100644
index db5e845e60f5..000000000000
--- a/drivers/staging/csr/csr_msgconv.c
+++ /dev/null
@@ -1,291 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2010
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/slab.h>
14#include "csr_sched.h"
15#include "csr_msgconv.h"
16#include "csr_macro.h"
17
18static CsrMsgConvEntry *converter;
19
20CsrMsgConvPrimEntry *CsrMsgConvFind(u16 primType)
21{
22 CsrMsgConvPrimEntry *ptr = NULL;
23
24 if (converter)
25 {
26 ptr = converter->profile_converters;
27 while (ptr)
28 {
29 if (ptr->primType == primType)
30 {
31 break;
32 }
33 else
34 {
35 ptr = ptr->next;
36 }
37 }
38 }
39
40 return ptr;
41}
42
43static const CsrMsgConvMsgEntry *find_msg_converter(CsrMsgConvPrimEntry *ptr, u16 msgType)
44{
45 const CsrMsgConvMsgEntry *cv = ptr->conv;
46 if (ptr->lookupFunc)
47 {
48 return (const CsrMsgConvMsgEntry *) ptr->lookupFunc((CsrMsgConvMsgEntry *) cv, msgType);
49 }
50
51 while (cv)
52 {
53 if (cv->serFunc == NULL)
54 {
55 /* We've reached the end of the chain */
56 cv = NULL;
57 break;
58 }
59
60 if (cv->msgType == msgType)
61 {
62 break;
63 }
64 else
65 {
66 cv++;
67 }
68 }
69
70 return cv;
71}
72
73static void *deserialize_data(u16 primType,
74 size_t length,
75 u8 *data)
76{
77 CsrMsgConvPrimEntry *ptr;
78 u8 *ret;
79
80 ptr = CsrMsgConvFind(primType);
81
82 if (ptr)
83 {
84 const CsrMsgConvMsgEntry *cv;
85 u16 msgId = 0;
86 size_t offset = 0;
87 CsrUint16Des(&msgId, data, &offset);
88
89 cv = find_msg_converter(ptr, msgId);
90 if (cv)
91 {
92 ret = cv->deserFunc(data, length);
93 }
94 else
95 {
96 ret = NULL;
97 }
98 }
99 else
100 {
101 ret = NULL;
102 }
103
104 return ret;
105}
106
107static size_t sizeof_message(u16 primType, void *msg)
108{
109 CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
110 size_t ret;
111
112 if (ptr)
113 {
114 const CsrMsgConvMsgEntry *cv;
115 u16 msgId = *(u16 *) msg;
116
117 cv = find_msg_converter(ptr, msgId);
118 if (cv)
119 {
120 ret = cv->sizeofFunc(msg);
121 }
122 else
123 {
124 ret = 0;
125 }
126 }
127 else
128 {
129 ret = 0;
130 }
131
132 return ret;
133}
134
135static u8 free_message(u16 primType, u8 *data)
136{
137 CsrMsgConvPrimEntry *ptr;
138 u8 ret;
139
140 ptr = CsrMsgConvFind(primType);
141
142 if (ptr)
143 {
144 const CsrMsgConvMsgEntry *cv;
145 u16 msgId = *(u16 *) data;
146
147 cv = find_msg_converter(ptr, msgId);
148 if (cv)
149 {
150 cv->freeFunc(data);
151 ret = TRUE;
152 }
153 else
154 {
155 ret = FALSE;
156 }
157 }
158 else
159 {
160 ret = FALSE;
161 }
162
163 return ret;
164}
165
166static u8 *serialize_message(u16 primType,
167 void *msg,
168 size_t *length,
169 u8 *buffer)
170{
171 CsrMsgConvPrimEntry *ptr;
172 u8 *ret;
173
174 ptr = CsrMsgConvFind(primType);
175
176 *length = 0;
177
178 if (ptr)
179 {
180 const CsrMsgConvMsgEntry *cv;
181
182 cv = find_msg_converter(ptr, *(u16 *) msg);
183 if (cv)
184 {
185 ret = cv->serFunc(buffer, length, msg);
186 }
187 else
188 {
189 ret = NULL;
190 }
191 }
192 else
193 {
194 ret = NULL;
195 }
196
197 return ret;
198}
199
200size_t CsrMsgConvSizeof(u16 primType, void *msg)
201{
202 return sizeof_message(primType, msg);
203}
204
205u8 *CsrMsgConvSerialize(u8 *buffer, size_t maxBufferOffset, size_t *offset, u16 primType, void *msg)
206{
207 if (converter)
208 {
209 size_t serializedLength;
210 u8 *bufSerialized;
211 u8 *bufOffset = &buffer[*offset];
212 bufSerialized = converter->serialize_message(primType, msg, &serializedLength, bufOffset);
213 *offset += serializedLength;
214 return bufSerialized;
215 }
216 else
217 {
218 return NULL;
219 }
220}
221
222/* Insert profile converter at head of converter list. */
223void CsrMsgConvInsert(u16 primType, const CsrMsgConvMsgEntry *ce)
224{
225 CsrMsgConvPrimEntry *pc;
226 pc = CsrMsgConvFind(primType);
227
228 if (pc)
229 {
230 /* Already registered. Do nothing */
231 }
232 else
233 {
234 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
235 pc->primType = primType;
236 pc->conv = ce;
237 pc->lookupFunc = NULL;
238 pc->next = converter->profile_converters;
239 converter->profile_converters = pc;
240 }
241}
242EXPORT_SYMBOL_GPL(CsrMsgConvInsert);
243
244CsrMsgConvMsgEntry *CsrMsgConvFindEntry(u16 primType, u16 msgType)
245{
246 CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
247 if (ptr)
248 {
249 return (CsrMsgConvMsgEntry *) find_msg_converter(ptr, msgType);
250 }
251 return NULL;
252}
253EXPORT_SYMBOL_GPL(CsrMsgConvFindEntry);
254
255CsrMsgConvMsgEntry *CsrMsgConvFindEntryByMsg(u16 primType, const void *msg)
256{
257 CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
258 if (ptr && msg)
259 {
260 u16 msgType = *((u16 *) msg);
261 return (CsrMsgConvMsgEntry *) find_msg_converter(ptr, msgType);
262 }
263 return NULL;
264}
265
266void CsrMsgConvCustomLookupRegister(u16 primType, CsrMsgCustomLookupFunc *lookupFunc)
267{
268 CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
269 if (ptr)
270 {
271 ptr->lookupFunc = lookupFunc;
272 }
273}
274EXPORT_SYMBOL_GPL(CsrMsgConvCustomLookupRegister);
275
276CsrMsgConvEntry *CsrMsgConvInit(void)
277{
278 if (!converter)
279 {
280 converter = kmalloc(sizeof(CsrMsgConvEntry), GFP_KERNEL);
281
282 converter->profile_converters = NULL;
283 converter->free_message = free_message;
284 converter->sizeof_message = sizeof_message;
285 converter->serialize_message = serialize_message;
286 converter->deserialize_data = deserialize_data;
287 }
288
289 return converter;
290}
291EXPORT_SYMBOL_GPL(CsrMsgConvInit);
diff --git a/drivers/staging/csr/csr_msgconv.h b/drivers/staging/csr/csr_msgconv.h
deleted file mode 100644
index 7e4dd388ae37..000000000000
--- a/drivers/staging/csr/csr_msgconv.h
+++ /dev/null
@@ -1,78 +0,0 @@
1#ifndef CSR_MSGCONV_H__
2#define CSR_MSGCONV_H__
3
4/*****************************************************************************
5
6 (c) Cambridge Silicon Radio Limited 2010
7 All rights reserved and confidential information of CSR
8
9 Refer to LICENSE.txt included with this source for details
10 on the license terms.
11
12*****************************************************************************/
13
14#include <linux/types.h>
15#include "csr_prim_defs.h"
16#include "csr_sched.h"
17
18typedef size_t (CsrMsgSizeofFunc)(void *msg);
19typedef u8 *(CsrMsgSerializeFunc)(u8 *buffer, size_t *length, void *msg);
20typedef void (CsrMsgFreeFunc)(void *msg);
21typedef void *(CsrMsgDeserializeFunc)(u8 *buffer, size_t length);
22
23/* Converter entry for one message type */
24typedef struct CsrMsgConvMsgEntry
25{
26 u16 msgType;
27 CsrMsgSizeofFunc *sizeofFunc;
28 CsrMsgSerializeFunc *serFunc;
29 CsrMsgDeserializeFunc *deserFunc;
30 CsrMsgFreeFunc *freeFunc;
31} CsrMsgConvMsgEntry;
32
33/* Optional lookup function */
34typedef CsrMsgConvMsgEntry *(CsrMsgCustomLookupFunc)(CsrMsgConvMsgEntry *ce, u16 msgType);
35
36/* All converter entries for one specific primitive */
37typedef struct CsrMsgConvPrimEntry
38{
39 u16 primType;
40 const CsrMsgConvMsgEntry *conv;
41 CsrMsgCustomLookupFunc *lookupFunc;
42 struct CsrMsgConvPrimEntry *next;
43} CsrMsgConvPrimEntry;
44
45typedef struct
46{
47 CsrMsgConvPrimEntry *profile_converters;
48 void *(*deserialize_data)(u16 primType, size_t length, u8 * data);
49 u8 (*free_message)(u16 primType, u8 *data);
50 size_t (*sizeof_message)(u16 primType, void *msg);
51 u8 *(*serialize_message)(u16 primType, void *msg,
52 size_t * length,
53 u8 * buffer);
54} CsrMsgConvEntry;
55
56size_t CsrMsgConvSizeof(u16 primType, void *msg);
57u8 *CsrMsgConvSerialize(u8 *buffer, size_t maxBufferOffset, size_t *offset, u16 primType, void *msg);
58void CsrMsgConvCustomLookupRegister(u16 primType, CsrMsgCustomLookupFunc *lookupFunc);
59void CsrMsgConvInsert(u16 primType, const CsrMsgConvMsgEntry *ce);
60CsrMsgConvPrimEntry *CsrMsgConvFind(u16 primType);
61CsrMsgConvMsgEntry *CsrMsgConvFindEntry(u16 primType, u16 msgType);
62CsrMsgConvMsgEntry *CsrMsgConvFindEntryByMsg(u16 primType, const void *msg);
63CsrMsgConvEntry *CsrMsgConvInit(void);
64
65/* Prototypes for primitive type serializers */
66void CsrUint8Ser(u8 *buffer, size_t *offset, u8 value);
67void CsrUint16Ser(u8 *buffer, size_t *offset, u16 value);
68void CsrUint32Ser(u8 *buffer, size_t *offset, u32 value);
69void CsrMemCpySer(u8 *buffer, size_t *offset, const void *value, size_t length);
70void CsrCharStringSer(u8 *buffer, size_t *offset, const char *value);
71
72void CsrUint8Des(u8 *value, u8 *buffer, size_t *offset);
73void CsrUint16Des(u16 *value, u8 *buffer, size_t *offset);
74void CsrUint32Des(u32 *value, u8 *buffer, size_t *offset);
75void CsrMemCpyDes(void *value, u8 *buffer, size_t *offset, size_t length);
76void CsrCharStringDes(char **value, u8 *buffer, size_t *offset);
77
78#endif
diff --git a/drivers/staging/csr/csr_prim_defs.h b/drivers/staging/csr/csr_prim_defs.h
deleted file mode 100644
index 81a1eaac30d9..000000000000
--- a/drivers/staging/csr/csr_prim_defs.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef CSR_PRIM_DEFS_H__
2#define CSR_PRIM_DEFS_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13/************************************************************************************
14 * Segmentation of primitives in upstream and downstream segment
15 ************************************************************************************/
16typedef u16 CsrPrim;
17#define CSR_PRIM_UPSTREAM ((CsrPrim) (0x8000))
18
19/************************************************************************************
20 * Primitive definitions for Synergy framework
21 ************************************************************************************/
22#define CSR_SYNERGY_EVENT_CLASS_BASE ((u16) (0x0600))
23
24#define CSR_HCI_PRIM ((u16) (0x0000 | CSR_SYNERGY_EVENT_CLASS_BASE))
25#define CSR_BCCMD_PRIM ((u16) (0x0001 | CSR_SYNERGY_EVENT_CLASS_BASE))
26#define CSR_HQ_PRIM ((u16) (0x0002 | CSR_SYNERGY_EVENT_CLASS_BASE))
27#define CSR_VM_PRIM ((u16) (0x0003 | CSR_SYNERGY_EVENT_CLASS_BASE))
28#define CSR_TM_BLUECORE_PRIM ((u16) (0x0004 | CSR_SYNERGY_EVENT_CLASS_BASE))
29#define CSR_FP_PRIM ((u16) (0x0005 | CSR_SYNERGY_EVENT_CLASS_BASE))
30#define CSR_IP_SOCKET_PRIM ((u16) (0x0006 | CSR_SYNERGY_EVENT_CLASS_BASE))
31#define CSR_IP_ETHER_PRIM ((u16) (0x0007 | CSR_SYNERGY_EVENT_CLASS_BASE))
32#define CSR_IP_IFCONFIG_PRIM ((u16) (0x0008 | CSR_SYNERGY_EVENT_CLASS_BASE))
33#define CSR_IP_INTERNAL_PRIM ((u16) (0x0009 | CSR_SYNERGY_EVENT_CLASS_BASE))
34#define CSR_FSAL_PRIM ((u16) (0x000A | CSR_SYNERGY_EVENT_CLASS_BASE))
35#define CSR_DATA_STORE_PRIM ((u16) (0x000B | CSR_SYNERGY_EVENT_CLASS_BASE))
36#define CSR_AM_PRIM ((u16) (0x000C | CSR_SYNERGY_EVENT_CLASS_BASE))
37#define CSR_TLS_PRIM ((u16) (0x000D | CSR_SYNERGY_EVENT_CLASS_BASE))
38#define CSR_DHCP_SERVER_PRIM ((u16) (0x000E | CSR_SYNERGY_EVENT_CLASS_BASE))
39#define CSR_TFTP_PRIM ((u16) (0x000F | CSR_SYNERGY_EVENT_CLASS_BASE))
40#define CSR_DSPM_PRIM ((u16) (0x0010 | CSR_SYNERGY_EVENT_CLASS_BASE))
41#define CSR_TLS_INTERNAL_PRIM ((u16) (0x0011 | CSR_SYNERGY_EVENT_CLASS_BASE))
42
43#define NUMBER_OF_CSR_FW_EVENTS (CSR_DSPM_PRIM - CSR_SYNERGY_EVENT_CLASS_BASE + 1)
44
45#define CSR_SYNERGY_EVENT_CLASS_MISC_BASE ((u16) (0x06A0))
46
47#define CSR_UI_PRIM ((u16) (0x0000 | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
48#define CSR_APP_PRIM ((u16) (0x0001 | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
49#define CSR_SDIO_PROBE_PRIM ((u16) (0x0002 | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
50
51#define NUMBER_OF_CSR_FW_MISC_EVENTS (CSR_SDIO_PROBE_PRIM - CSR_SYNERGY_EVENT_CLASS_MISC_BASE + 1)
52
53#define CSR_ENV_PRIM ((u16) (0x00FF | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
54
55#endif /* CSR_PRIM_DEFS_H__ */
diff --git a/drivers/staging/csr/csr_result.h b/drivers/staging/csr/csr_result.h
deleted file mode 100644
index cbb607d943c7..000000000000
--- a/drivers/staging/csr/csr_result.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef CSR_RESULT_H__
2#define CSR_RESULT_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13typedef u16 CsrResult;
14#define CSR_RESULT_SUCCESS ((CsrResult) 0x0000)
15#define CSR_RESULT_FAILURE ((CsrResult) 0xFFFF)
16
17#endif
diff --git a/drivers/staging/csr/csr_sched.h b/drivers/staging/csr/csr_sched.h
deleted file mode 100644
index c7d672c59f5b..000000000000
--- a/drivers/staging/csr/csr_sched.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef CSR_SCHED_H__
2#define CSR_SCHED_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12#include <linux/types.h>
13#include "csr_time.h"
14
15/* An identifier issued by the scheduler. */
16typedef u32 CsrSchedIdentifier;
17
18/* A task identifier */
19typedef u16 CsrSchedTaskId;
20
21/* A queue identifier */
22typedef u16 CsrSchedQid;
23
24/* A message identifier */
25typedef CsrSchedIdentifier CsrSchedMsgId;
26
27/* A timer event identifier */
28typedef CsrSchedIdentifier CsrSchedTid;
29#define CSR_SCHED_TID_INVALID ((CsrSchedTid) 0)
30
31/* Time constants. */
32#define CSR_SCHED_TIME_MAX (0xFFFFFFFF)
33#define CSR_SCHED_MILLISECOND (1000)
34#define CSR_SCHED_SECOND (1000 * CSR_SCHED_MILLISECOND)
35#define CSR_SCHED_MINUTE (60 * CSR_SCHED_SECOND)
36
37/* Queue and primitive that identifies the environment */
38#define CSR_SCHED_TASK_ID 0xFFFF
39#define CSR_SCHED_PRIM (CSR_SCHED_TASK_ID)
40#define CSR_SCHED_EXCLUDED_MODULE_QUEUE 0xFFFF
41
42/*
43 * Background interrupt definitions
44 */
45typedef u16 CsrSchedBgint;
46#define CSR_SCHED_BGINT_INVALID ((CsrSchedBgint) 0xFFFF)
47
48/*----------------------------------------------------------------------------*
49 * NAME
50 * CsrSchedMessagePut
51 *
52 * DESCRIPTION
53 * Sends a message consisting of the integer "mi" and the void * pointer
54 * "mv" to the message queue "q".
55 *
56 * "mi" and "mv" are neither inspected nor changed by the scheduler - the
57 * task that owns "q" is expected to make sense of the values. "mv" may
58 * be null.
59 *
60 * NOTE
61 * If "mv" is not null then it will typically be a chunk of kmalloc()ed
62 * memory, though there is no need for it to be so. Tasks should normally
63 * obey the convention that when a message built with kmalloc()ed memory
64 * is given to CsrSchedMessagePut() then ownership of the memory is ceded to the
65 * scheduler - and eventually to the recipient task. I.e., the receiver of
66 * the message will be expected to kfree() the message storage.
67 *
68 * RETURNS
69 * void.
70 *
71 *----------------------------------------------------------------------------*/
72#if defined(CSR_LOG_ENABLE) && defined(CSR_LOG_INCLUDE_FILE_NAME_AND_LINE_NUMBER)
73void CsrSchedMessagePutStringLog(CsrSchedQid q,
74 u16 mi,
75 void *mv,
76 u32 line,
77 const char *file);
78#define CsrSchedMessagePut(q, mi, mv) CsrSchedMessagePutStringLog((q), (mi), (mv), __LINE__, __FILE__)
79#else
80void CsrSchedMessagePut(CsrSchedQid q,
81 u16 mi,
82 void *mv);
83#endif
84
85#endif
diff --git a/drivers/staging/csr/csr_sdio.h b/drivers/staging/csr/csr_sdio.h
deleted file mode 100644
index 0971d135abf6..000000000000
--- a/drivers/staging/csr/csr_sdio.h
+++ /dev/null
@@ -1,723 +0,0 @@
1#ifndef CSR_SDIO_H__
2#define CSR_SDIO_H__
3/*****************************************************************************
4
5 (c) Cambridge Silicon Radio Limited 2010
6 All rights reserved and confidential information of CSR
7
8 Refer to LICENSE.txt included with this source for details
9 on the license terms.
10
11*****************************************************************************/
12
13#include "csr_result.h"
14
15/* Result Codes */
16#define CSR_SDIO_RESULT_INVALID_VALUE ((CsrResult) 1) /* Invalid argument value */
17#define CSR_SDIO_RESULT_NO_DEVICE ((CsrResult) 2) /* The specified device is no longer present */
18#define CSR_SDIO_RESULT_CRC_ERROR ((CsrResult) 3) /* The transmitted/received data or command response contained a CRC error */
19#define CSR_SDIO_RESULT_TIMEOUT ((CsrResult) 4) /* No command response or data received from device, or function enable/disable did not succeed within timeout period */
20#define CSR_SDIO_RESULT_NOT_RESET ((CsrResult) 5) /* The device was not reset */
21
22/* Features (for use in features member of CsrSdioFunction) */
23#define CSR_SDIO_FEATURE_BYTE_MODE 0x00000001 /* Transfer sizes do not have to be a multiple of block size */
24#define CSR_SDIO_FEATURE_DMA_CAPABLE_MEM_REQUIRED 0x00000002 /* Bulk operations require DMA friendly memory */
25
26/* CsrSdioFunctionId wildcards (for use in CsrSdioFunctionId members) */
27#define CSR_SDIO_ANY_MANF_ID 0xFFFF
28#define CSR_SDIO_ANY_CARD_ID 0xFFFF
29#define CSR_SDIO_ANY_SDIO_FUNCTION 0xFF
30#define CSR_SDIO_ANY_SDIO_INTERFACE 0xFF
31
32/*----------------------------------------------------------------------------*
33 * NAME
34 * CsrSdioFunctionId
35 *
36 * DESCRIPTION
37 * This structure describes one or more functions of a device, based on
38 * four qualitative measures. The CsrSdioFunctionId wildcard defines can be
39 * used for making the CsrSdioFunctionId match more than one function.
40 *
41 * MEMBERS
42 * manfId - Vendor ID (or CSR_SDIO_ANY_MANF_ID).
43 * cardId - Device ID (or CSR_SDIO_ANY_CARD_ID).
44 * sdioFunction - SDIO Function number (or CSR_SDIO_ANY_SDIO_FUNCTION).
45 * sdioInterface - SDIO Standard Interface Code (or CSR_SDIO_ANY_SDIO_INTERFACE)
46 *
47 *----------------------------------------------------------------------------*/
48typedef struct
49{
50 u16 manfId; /* Vendor ID to match or CSR_SDIO_ANY_MANF_ID */
51 u16 cardId; /* Device ID to match or CSR_SDIO_ANY_CARD_ID */
52 u8 sdioFunction; /* SDIO Function number to match or CSR_SDIO_ANY_SDIO_FUNCTION */
53 u8 sdioInterface; /* SDIO Standard Interface Code to match or CSR_SDIO_ANY_SDIO_INTERFACE */
54} CsrSdioFunctionId;
55
56/*----------------------------------------------------------------------------*
57 * NAME
58 * CsrSdioFunction
59 *
60 * DESCRIPTION
61 * This structure represents a single function on a device.
62 *
63 * MEMBERS
64 * sdioId - A CsrSdioFunctionId describing this particular function. The
65 * subfield shall not contain any CsrSdioFunctionId wildcards. The
66 * subfields shall describe the specific single function
67 * represented by this structure.
68 * blockSize - Actual configured block size, or 0 if unconfigured.
69 * features - Bit mask with any of CSR_SDIO_FEATURE_* set.
70 * device - Handle of device containing the function. If two functions have
71 * the same device handle, they reside on the same device.
72 * driverData - For use by the Function Driver. The SDIO Driver shall not
73 * attempt to dereference the pointer.
74 * priv - For use by the SDIO Driver. The Function Driver shall not attempt
75 * to dereference the pointer.
76 *
77 *
78 *----------------------------------------------------------------------------*/
79typedef struct
80{
81 CsrSdioFunctionId sdioId;
82 u16 blockSize; /* Actual configured block size, or 0 if unconfigured */
83 u32 features; /* Bit mask with any of CSR_SDIO_FEATURE_* set */
84 void *device; /* Handle of device containing the function */
85 void *driverData; /* For use by the Function Driver */
86 void *priv; /* For use by the SDIO Driver */
87} CsrSdioFunction;
88
89/*----------------------------------------------------------------------------*
90 * NAME
91 * CsrSdioInsertedCallback, CsrSdioRemovedCallback
92 *
93 * DESCRIPTION
94 * CsrSdioInsertedCallback is called when a function becomes available to
95 * a registered Function Driver that supports the function.
96 * CsrSdioRemovedCallback is called when a function is no longer available
97 * to a Function Driver, either because the device has been removed, or the
98 * Function Driver has been unregistered.
99 *
100 * NOTE: These functions are implemented by the Function Driver, and are
101 * passed as function pointers in the CsrSdioFunctionDriver struct.
102 *
103 * PARAMETERS
104 * function - Pointer to struct representing the function.
105 *
106 *----------------------------------------------------------------------------*/
107typedef void (*CsrSdioInsertedCallback)(CsrSdioFunction *function);
108typedef void (*CsrSdioRemovedCallback)(CsrSdioFunction *function);
109
110/*----------------------------------------------------------------------------*
111 * NAME
112 * CsrSdioInterruptDsrCallback, CsrSdioInterruptCallback
113 *
114 * DESCRIPTION
115 * CsrSdioInterruptCallback is called when an interrupt occurs on the
116 * the device associated with the specified function.
117 *
118 * NOTE: These functions are implemented by the Function Driver, and are
119 * passed as function pointers in the CsrSdioFunctionDriver struct.
120 *
121 * PARAMETERS
122 * function - Pointer to struct representing the function.
123 *
124 * RETURNS (only CsrSdioInterruptCallback)
125 * A pointer to a CsrSdioInterruptDsrCallback function.
126 *
127 *----------------------------------------------------------------------------*/
128typedef void (*CsrSdioInterruptDsrCallback)(CsrSdioFunction *function);
129typedef CsrSdioInterruptDsrCallback (*CsrSdioInterruptCallback)(CsrSdioFunction *function);
130
131/*----------------------------------------------------------------------------*
132 * NAME
133 * CsrSdioSuspendCallback, CsrSdioResumeCallback
134 *
135 * DESCRIPTION
136 * CsrSdioSuspendCallback is called when the system is preparing to go
137 * into a suspended state. CsrSdioResumeCallback is called when the system
138 * has entered an active state again.
139 *
140 * NOTE: These functions are implemented by the Function Driver, and are
141 * passed as function pointers in the CsrSdioFunctionDriver struct.
142 *
143 * PARAMETERS
144 * function - Pointer to struct representing the function.
145 *
146 *----------------------------------------------------------------------------*/
147typedef void (*CsrSdioSuspendCallback)(CsrSdioFunction *function);
148typedef void (*CsrSdioResumeCallback)(CsrSdioFunction *function);
149
150/*----------------------------------------------------------------------------*
151 * NAME
152 * CsrSdioAsyncCallback, CsrSdioAsyncDsrCallback
153 *
154 * DESCRIPTION
155 * CsrSdioAsyncCallback is called when an asynchronous operation completes.
156 *
157 * NOTE: These functions are implemented by the Function Driver, and are
158 * passed as function pointers in the function calls that initiate
159 * the operation.
160 *
161 * PARAMETERS
162 * function - Pointer to struct representing the function.
163 * result - The result of the operation that completed. See the description
164 * of the initiating function for possible result values.
165 *
166 * RETURNS (only CsrSdioAsyncCallback)
167 * A pointer to a CsrSdioAsyncDsrCallback function.
168 *
169 *----------------------------------------------------------------------------*/
170typedef void (*CsrSdioAsyncDsrCallback)(CsrSdioFunction *function, CsrResult result);
171typedef CsrSdioAsyncDsrCallback (*CsrSdioAsyncCallback)(CsrSdioFunction *function, CsrResult result);
172
173/*----------------------------------------------------------------------------*
174 * NAME
175 * CsrSdioFunctionDriver
176 *
177 * DESCRIPTION
178 * Structure representing a Function Driver.
179 *
180 * MEMBERS
181 * inserted - Callback, see description of CsrSdioInsertedCallback.
182 * removed - Callback, see description of CsrSdioRemovedCallback.
183 * intr - Callback, see description of CsrSdioInterruptCallback.
184 * suspend - Callback, see description of CsrSdioSuspendCallback.
185 * resume - Callback, see description of CsrSdioResumeCallback.
186 * ids - Array of CsrSdioFunctionId describing one or more functions that
187 * are supported by the Function Driver.
188 * idsCount - Length of the ids array.
189 * priv - For use by the SDIO Driver. The Function Driver may initialise
190 * it to NULL, but shall otherwise not access the pointer or attempt
191 * to dereference it.
192 *
193 *----------------------------------------------------------------------------*/
194typedef struct
195{
196 CsrSdioInsertedCallback inserted;
197 CsrSdioRemovedCallback removed;
198 CsrSdioInterruptCallback intr;
199 CsrSdioSuspendCallback suspend;
200 CsrSdioResumeCallback resume;
201 CsrSdioFunctionId *ids;
202 u8 idsCount;
203 void *priv; /* For use by the SDIO Driver */
204} CsrSdioFunctionDriver;
205
206/*----------------------------------------------------------------------------*
207 * NAME
208 * CsrSdioFunctionDriverRegister
209 *
210 * DESCRIPTION
211 * Register a Function Driver.
212 *
213 * PARAMETERS
214 * functionDriver - Pointer to struct describing the Function Driver.
215 *
216 * RETURNS
217 * CSR_RESULT_SUCCESS - The Function Driver was successfully
218 * registered.
219 * CSR_RESULT_FAILURE - Unable to register the function driver,
220 * because of an unspecified/unknown error. The
221 * Function Driver has not been registered.
222 * CSR_SDIO_RESULT_INVALID_VALUE - The specified Function Driver pointer
223 * does not point at a valid Function
224 * Driver structure, or some of the members
225 * contain invalid entries.
226 *
227 *----------------------------------------------------------------------------*/
228CsrResult CsrSdioFunctionDriverRegister(CsrSdioFunctionDriver *functionDriver);
229
230/*----------------------------------------------------------------------------*
231 * NAME
232 * CsrSdioFunctionDriverUnregister
233 *
234 * DESCRIPTION
235 * Unregister a previously registered Function Driver.
236 *
237 * PARAMETERS
238 * functionDriver - pointer to struct describing the Function Driver.
239 *
240 *----------------------------------------------------------------------------*/
241void CsrSdioFunctionDriverUnregister(CsrSdioFunctionDriver *functionDriver);
242
243/*----------------------------------------------------------------------------*
244 * NAME
245 * CsrSdioFunctionEnable, CsrSdioFunctionDisable
246 *
247 * DESCRIPTION
248 * Enable/disable the specified function by setting/clearing the
249 * corresponding bit in the I/O Enable register in function 0, and then
250 * periodically reading the related bit in the I/O Ready register until it
251 * is set/clear, limited by an implementation defined timeout.
252 *
253 * PARAMETERS
254 * function - Pointer to struct representing the function.
255 *
256 * RETURNS
257 * CSR_RESULT_SUCCESS - The specified function was enabled/disabled.
258 * CSR_RESULT_FAILURE - Unspecified/unknown error.
259 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
260 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. The state of the
261 * related bit in the I/O Enable register is
262 * undefined.
263 * CSR_SDIO_RESULT_TIMEOUT - No response from the device, or the related
264 * bit in the I/O ready register was not
265 * set/cleared within the timeout period.
266 *
267 * NOTE: If the SDIO R5 response is available, and either of the
268 * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
269 * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
270 * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
271 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
272 * COM_CRC_ERROR bits shall be ignored.
273 *
274 * If the CSPI response is available, and any of the
275 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
276 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
277 *
278 *----------------------------------------------------------------------------*/
279CsrResult CsrSdioFunctionEnable(CsrSdioFunction *function);
280CsrResult CsrSdioFunctionDisable(CsrSdioFunction *function);
281
282/*----------------------------------------------------------------------------*
283 * NAME
284 * CsrSdioInterruptEnable, CsrSdioInterruptDisable
285 *
286 * DESCRIPTION
287 * Enable/disable the interrupt for the specified function by
288 * setting/clearing the corresponding bit in the INT Enable register in
289 * function 0.
290 *
291 * PARAMETERS
292 * function - Pointer to struct representing the function.
293 *
294 * RETURNS
295 * CSR_RESULT_SUCCESS - The specified function was enabled/disabled.
296 * CSR_RESULT_FAILURE - Unspecified/unknown error.
297 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
298 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. The state of the
299 * related bit in the INT Enable register is
300 * unchanged.
301 * CSR_SDIO_RESULT_INVALID_VALUE - The specified function cannot be
302 * enabled/disabled, because it either
303 * does not exist or it is not possible to
304 * individually enable/disable functions.
305 * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
306 *
307 * NOTE: If the SDIO R5 response is available, and either of the
308 * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
309 * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
310 * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
311 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
312 * COM_CRC_ERROR bits shall be ignored.
313 *
314 * If the CSPI response is available, and any of the
315 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
316 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
317 *
318 *----------------------------------------------------------------------------*/
319CsrResult CsrSdioInterruptEnable(CsrSdioFunction *function);
320CsrResult CsrSdioInterruptDisable(CsrSdioFunction *function);
321
322/*----------------------------------------------------------------------------*
323 * NAME
324 * CsrSdioInterruptAcknowledge
325 *
326 * DESCRIPTION
327 * Acknowledge that a signalled interrupt has been handled. Shall only
328 * be called once, and exactly once for each signalled interrupt to the
329 * corresponding function.
330 *
331 * PARAMETERS
332 * function - Pointer to struct representing the function to which the
333 * event was signalled.
334 *
335 *----------------------------------------------------------------------------*/
336void CsrSdioInterruptAcknowledge(CsrSdioFunction *function);
337
338/*----------------------------------------------------------------------------*
339 * NAME
340 * CsrSdioInsertedAcknowledge, CsrSdioRemovedAcknowledge
341 *
342 * DESCRIPTION
343 * Acknowledge that a signalled inserted/removed event has been handled.
344 * Shall only be called once, and exactly once for each signalled event to
345 * the corresponding function.
346 *
347 * PARAMETERS
348 * function - Pointer to struct representing the function to which the
349 * inserted was signalled.
350 * result (CsrSdioInsertedAcknowledge only)
351 * CSR_RESULT_SUCCESS - The Function Driver has accepted the
352 * function, and the function is attached to
353 * the Function Driver until the
354 * CsrSdioRemovedCallback is called and
355 * acknowledged.
356 * CSR_RESULT_FAILURE - Unable to accept the function. The
357 * function is not attached to the Function
358 * Driver, and it may be passed to another
359 * Function Driver which supports the
360 * function.
361 *
362 *----------------------------------------------------------------------------*/
363void CsrSdioInsertedAcknowledge(CsrSdioFunction *function, CsrResult result);
364void CsrSdioRemovedAcknowledge(CsrSdioFunction *function);
365
366/*----------------------------------------------------------------------------*
367 * NAME
368 * CsrSdioSuspendAcknowledge, CsrSdioResumeAcknowledge
369 *
370 * DESCRIPTION
371 * Acknowledge that a signalled suspend event has been handled. Shall only
372 * be called once, and exactly once for each signalled event to the
373 * corresponding function.
374 *
375 * PARAMETERS
376 * function - Pointer to struct representing the function to which the
377 * event was signalled.
378 * result
379 * CSR_RESULT_SUCCESS - Successfully suspended/resumed.
380 * CSR_RESULT_FAILURE - Unspecified/unknown error.
381 *
382 *----------------------------------------------------------------------------*/
383void CsrSdioSuspendAcknowledge(CsrSdioFunction *function, CsrResult result);
384void CsrSdioResumeAcknowledge(CsrSdioFunction *function, CsrResult result);
385
386/*----------------------------------------------------------------------------*
387 * NAME
388 * CsrSdioBlockSizeSet
389 *
390 * DESCRIPTION
391 * Set the block size to use for the function. The actual configured block
392 * size shall be the minimum of:
393 * 1) Maximum block size supported by the function.
394 * 2) Maximum block size supported by the host controller.
395 * 3) The block size specified by the blockSize argument.
396 *
397 * When this function returns, the actual configured block size is
398 * available in the blockSize member of the function struct.
399 *
400 * PARAMETERS
401 * function - Pointer to struct representing the function.
402 * blockSize - Block size to use for the function. Valid range is 1 to
403 * 2048.
404 *
405 * RETURNS
406 * CSR_RESULT_SUCCESS - The block size register on the chip
407 * was updated.
408 * CSR_RESULT_FAILURE - Unspecified/unknown error.
409 * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
410 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
411 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. The configured block
412 * size is undefined.
413 * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
414 *
415 * NOTE: If the SDIO R5 response is available, and the FUNCTION_NUMBER
416 * bits is set, CSR_SDIO_RESULT_INVALID_VALUE shall be returned.
417 * If the ERROR bit is set (but not FUNCTION_NUMBER),
418 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
419 * COM_CRC_ERROR bits shall be ignored.
420 *
421 * If the CSPI response is available, and any of the
422 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
423 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
424 *
425 * NOTE: Setting the block size requires two individual operations. The
426 * implementation shall ignore the OUT_OF_RANGE bit of the SDIO R5
427 * response for the first operation, as the partially configured
428 * block size may be out of range, even if the final block size
429 * (after the second operation) is in the valid range.
430 *
431 *----------------------------------------------------------------------------*/
432CsrResult CsrSdioBlockSizeSet(CsrSdioFunction *function, u16 blockSize);
433
434/*----------------------------------------------------------------------------*
435 * NAME
436 * CsrSdioMaxBusClockFrequencySet
437 *
438 * DESCRIPTION
439 * Set the maximum clock frequency to use for the device associated with
440 * the specified function. The actual configured clock frequency for the
441 * device shall be the minimum of:
442 * 1) Maximum clock frequency supported by the device.
443 * 2) Maximum clock frequency supported by the host controller.
444 * 3) Maximum clock frequency specified for any function on the same
445 * device.
446 *
447 * If the clock frequency exceeds 25MHz, it is the responsibility of the
448 * SDIO driver to enable high speed mode on the device, using the standard
449 * defined procedure, before increasing the frequency beyond the limit.
450 *
451 * Note that the clock frequency configured affects all functions on the
452 * same device.
453 *
454 * PARAMETERS
455 * function - Pointer to struct representing the function.
456 * maxFrequency - The maximum clock frequency for the function in Hertz.
457 *
458 * RETURNS
459 * CSR_RESULT_SUCCESS - The maximum clock frequency was successfully
460 * set for the function.
461 * CSR_RESULT_FAILURE - Unspecified/unknown error.
462 * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
463 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
464 *
465 * NOTE: If the SDIO R5 response is available, and the FUNCTION_NUMBER
466 * bits is set, CSR_SDIO_RESULT_INVALID_VALUE shall be returned.
467 * If the ERROR bit is set (but not FUNCTION_NUMBER),
468 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
469 * COM_CRC_ERROR bits shall be ignored.
470 *
471 * If the CSPI response is available, and any of the
472 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
473 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
474 *
475 *
476 *----------------------------------------------------------------------------*/
477CsrResult CsrSdioMaxBusClockFrequencySet(CsrSdioFunction *function, u32 maxFrequency);
478
479/*----------------------------------------------------------------------------*
480 * NAME
481 * CsrSdioRead8, CsrSdioWrite8, CsrSdioRead8Async, CsrSdioWrite8Async
482 *
483 * DESCRIPTION
484 * Read/write an 8bit value from/to the specified register address.
485 *
486 * PARAMETERS
487 * function - Pointer to struct representing the function.
488 * address - Register address within the function.
489 * data - The data to read/write.
490 * callback - The function to call on operation completion.
491 *
492 * RETURNS
493 * CSR_RESULT_SUCCESS - The data was successfully read/written.
494 * CSR_RESULT_FAILURE - Unspecified/unknown error.
495 * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
496 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
497 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. No data read/written.
498 * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
499 *
500 * NOTE: If the SDIO R5 response is available, and either of the
501 * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
502 * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
503 * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
504 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
505 * COM_CRC_ERROR bits shall be ignored.
506 *
507 * If the CSPI response is available, and any of the
508 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
509 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
510 *
511 * NOTE: The CsrSdioRead8Async and CsrSdioWrite8Async functions return
512 * immediately, and the supplied callback function is called when the
513 * operation is complete. The result value is given as an argument to
514 * the callback function.
515 *
516 *----------------------------------------------------------------------------*/
517CsrResult CsrSdioRead8(CsrSdioFunction *function, u32 address, u8 *data);
518CsrResult CsrSdioWrite8(CsrSdioFunction *function, u32 address, u8 data);
519void CsrSdioRead8Async(CsrSdioFunction *function, u32 address, u8 *data, CsrSdioAsyncCallback callback);
520void CsrSdioWrite8Async(CsrSdioFunction *function, u32 address, u8 data, CsrSdioAsyncCallback callback);
521
522/*----------------------------------------------------------------------------*
523 * NAME
524 * CsrSdioRead16, CsrSdioWrite16, CsrSdioRead16Async, CsrSdioWrite16Async
525 *
526 * DESCRIPTION
527 * Read/write a 16bit value from/to the specified register address.
528 *
529 * PARAMETERS
530 * function - Pointer to struct representing the function.
531 * address - Register address within the function.
532 * data - The data to read/write.
533 * callback - The function to call on operation completion.
534 *
535 * RETURNS
536 * CSR_RESULT_SUCCESS - The data was successfully read/written.
537 * CSR_RESULT_FAILURE - Unspecified/unknown error.
538 * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
539 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
540 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. Data may have been
541 * partially read/written.
542 * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
543 *
544 * NOTE: If the SDIO R5 response is available, and either of the
545 * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
546 * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
547 * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
548 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
549 * COM_CRC_ERROR bits shall be ignored.
550 *
551 * If the CSPI response is available, and any of the
552 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
553 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
554 *
555 * NOTE: The CsrSdioRead16Async and CsrSdioWrite16Async functions return
556 * immediately, and the supplied callback function is called when the
557 * operation is complete. The result value is given as an argument to
558 * the callback function.
559 *
560 *----------------------------------------------------------------------------*/
561CsrResult CsrSdioRead16(CsrSdioFunction *function, u32 address, u16 *data);
562CsrResult CsrSdioWrite16(CsrSdioFunction *function, u32 address, u16 data);
563void CsrSdioRead16Async(CsrSdioFunction *function, u32 address, u16 *data, CsrSdioAsyncCallback callback);
564void CsrSdioWrite16Async(CsrSdioFunction *function, u32 address, u16 data, CsrSdioAsyncCallback callback);
565
566/*----------------------------------------------------------------------------*
567 * NAME
568 * CsrSdioF0Read8, CsrSdioF0Write8, CsrSdioF0Read8Async,
569 * CsrSdioF0Write8Async
570 *
571 * DESCRIPTION
572 * Read/write an 8bit value from/to the specified register address in
573 * function 0.
574 *
575 * PARAMETERS
576 * function - Pointer to struct representing the function.
577 * address - Register address within the function.
578 * data - The data to read/write.
579 * callback - The function to call on operation completion.
580 *
581 * RETURNS
582 * CSR_RESULT_SUCCESS - The data was successfully read/written.
583 * CSR_RESULT_FAILURE - Unspecified/unknown error.
584 * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
585 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
586 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. No data read/written.
587 * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
588 *
589 * NOTE: If the SDIO R5 response is available, and either of the
590 * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
591 * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
592 * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
593 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
594 * COM_CRC_ERROR bits shall be ignored.
595 *
596 * If the CSPI response is available, and any of the
597 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
598 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
599 *
600 * NOTE: The CsrSdioF0Read8Async and CsrSdioF0Write8Async functions return
601 * immediately, and the supplied callback function is called when the
602 * operation is complete. The result value is given as an argument to
603 * the callback function.
604 *
605 *----------------------------------------------------------------------------*/
606CsrResult CsrSdioF0Read8(CsrSdioFunction *function, u32 address, u8 *data);
607CsrResult CsrSdioF0Write8(CsrSdioFunction *function, u32 address, u8 data);
608void CsrSdioF0Read8Async(CsrSdioFunction *function, u32 address, u8 *data, CsrSdioAsyncCallback callback);
609void CsrSdioF0Write8Async(CsrSdioFunction *function, u32 address, u8 data, CsrSdioAsyncCallback callback);
610
611/*----------------------------------------------------------------------------*
612 * NAME
613 * CsrSdioRead, CsrSdioWrite, CsrSdioReadAsync, CsrSdioWriteAsync
614 *
615 * DESCRIPTION
616 * Read/write a specified number of bytes from/to the specified register
617 * address.
618 *
619 * PARAMETERS
620 * function - Pointer to struct representing the function.
621 * address - Register address within the function.
622 * data - The data to read/write.
623 * length - Number of byte to read/write.
624 * callback - The function to call on operation completion.
625 *
626 * RETURNS
627 * CSR_RESULT_SUCCESS - The data was successfully read/written.
628 * CSR_RESULT_FAILURE - Unspecified/unknown error.
629 * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
630 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
631 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. Data may have been
632 * partially read/written.
633 * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
634 *
635 * NOTE: If the SDIO R5 response is available, and either of the
636 * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
637 * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
638 * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
639 * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
640 * COM_CRC_ERROR bits shall be ignored.
641 *
642 * If the CSPI response is available, and any of the
643 * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
644 * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
645 *
646 * NOTE: The CsrSdioF0Read8Async and CsrSdioF0Write8Async functions return
647 * immediately, and the supplied callback function is called when the
648 * operation is complete. The result value is given as an argument to
649 * the callback function.
650 *
651 *----------------------------------------------------------------------------*/
652CsrResult CsrSdioRead(CsrSdioFunction *function, u32 address, void *data, u32 length);
653CsrResult CsrSdioWrite(CsrSdioFunction *function, u32 address, const void *data, u32 length);
654void CsrSdioReadAsync(CsrSdioFunction *function, u32 address, void *data, u32 length, CsrSdioAsyncCallback callback);
655void CsrSdioWriteAsync(CsrSdioFunction *function, u32 address, const void *data, u32 length, CsrSdioAsyncCallback callback);
656
657/*----------------------------------------------------------------------------*
658 * NAME
659 * CsrSdioPowerOn, CsrSdioPowerOff
660 *
661 * DESCRIPTION
662 * Power on/off the device.
663 *
664 * PARAMETERS
665 * function - Pointer to struct representing the function that resides on
666 * the device to power on/off.
667 *
668 * RETURNS (only CsrSdioPowerOn)
669 * CSR_RESULT_SUCCESS - Power was successfully reapplied and the device
670 * has been reinitialised.
671 * CSR_RESULT_FAILURE - Unspecified/unknown error.
672 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
673 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred during reinitialisation.
674 * CSR_SDIO_RESULT_TIMEOUT - No response from the device during
675 * reinitialisation.
676 * CSR_SDIO_RESULT_NOT_RESET - The power was not removed by the
677 * CsrSdioPowerOff call. The state of the
678 * device is unchanged.
679 *
680 *----------------------------------------------------------------------------*/
681CsrResult CsrSdioPowerOn(CsrSdioFunction *function);
682void CsrSdioPowerOff(CsrSdioFunction *function);
683
684/*----------------------------------------------------------------------------*
685 * NAME
686 * CsrSdioHardReset
687 *
688 * DESCRIPTION
689 * Perform a hardware reset of the device.
690 *
691 * PARAMETERS
692 * function - Pointer to struct representing the function that resides on
693 * the device to hard reset.
694 *
695 * RETURNS
696 * CSR_RESULT_SUCCESS - Reset was successfully performed and the device
697 * has been reinitialised.
698 * CSR_RESULT_FAILURE - Unspecified/unknown error.
699 * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
700 * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred during reinitialisation.
701 * CSR_SDIO_RESULT_TIMEOUT - No response from the device during
702 * reinitialisation.
703 * CSR_SDIO_RESULT_NOT_RESET - The reset was not applied because it is not
704 * supported. The state of the device is
705 * unchanged.
706 *
707 *----------------------------------------------------------------------------*/
708CsrResult CsrSdioHardReset(CsrSdioFunction *function);
709
710/*----------------------------------------------------------------------------*
711 * NAME
712 * CsrSdioFunctionActive, CsrSdioFunctionIdle
713 *
714 * DESCRIPTION
715 *
716 * PARAMETERS
717 * function - Pointer to struct representing the function.
718 *
719 *----------------------------------------------------------------------------*/
720void CsrSdioFunctionActive(CsrSdioFunction *function);
721void CsrSdioFunctionIdle(CsrSdioFunction *function);
722
723#endif
diff --git a/drivers/staging/csr/csr_serialize_primitive_types.c b/drivers/staging/csr/csr_serialize_primitive_types.c
deleted file mode 100644
index 9713b9afef64..000000000000
--- a/drivers/staging/csr/csr_serialize_primitive_types.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2010
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#include <linux/module.h>
12#include <linux/slab.h>
13#include "csr_prim_defs.h"
14#include "csr_msgconv.h"
15#include "csr_macro.h"
16
17void CsrUint8Des(u8 *value, u8 *buffer, size_t *offset)
18{
19 *value = buffer[*offset];
20 *offset += sizeof(*value);
21}
22EXPORT_SYMBOL_GPL(CsrUint8Des);
23
24void CsrUint16Des(u16 *value, u8 *buffer, size_t *offset)
25{
26 *value = (buffer[*offset + 0] << 0) |
27 (buffer[*offset + 1] << 8);
28 *offset += sizeof(*value);
29}
30EXPORT_SYMBOL_GPL(CsrUint16Des);
31
32void CsrUint32Des(u32 *value, u8 *buffer, size_t *offset)
33{
34 *value = (buffer[*offset + 0] << 0) |
35 (buffer[*offset + 1] << 8) |
36 (buffer[*offset + 2] << 16) |
37 (buffer[*offset + 3] << 24);
38 *offset += sizeof(*value);
39}
40EXPORT_SYMBOL_GPL(CsrUint32Des);
41
42void CsrMemCpyDes(void *value, u8 *buffer, size_t *offset, size_t length)
43{
44 memcpy(value, &buffer[*offset], length);
45 *offset += length;
46}
47EXPORT_SYMBOL_GPL(CsrMemCpyDes);
48
49void CsrCharStringDes(char **value, u8 *buffer, size_t *offset)
50{
51 *value = kstrdup((char *) &buffer[*offset], GFP_KERNEL);
52 *offset += strlen(*value) + 1;
53}
54EXPORT_SYMBOL_GPL(CsrCharStringDes);
55
56void CsrUint8Ser(u8 *buffer, size_t *offset, u8 value)
57{
58 buffer[*offset] = value;
59 *offset += sizeof(value);
60}
61EXPORT_SYMBOL_GPL(CsrUint8Ser);
62
63void CsrUint16Ser(u8 *buffer, size_t *offset, u16 value)
64{
65 buffer[*offset + 0] = (u8) ((value >> 0) & 0xFF);
66 buffer[*offset + 1] = (u8) ((value >> 8) & 0xFF);
67 *offset += sizeof(value);
68}
69EXPORT_SYMBOL_GPL(CsrUint16Ser);
70
71void CsrUint32Ser(u8 *buffer, size_t *offset, u32 value)
72{
73 buffer[*offset + 0] = (u8) ((value >> 0) & 0xFF);
74 buffer[*offset + 1] = (u8) ((value >> 8) & 0xFF);
75 buffer[*offset + 2] = (u8) ((value >> 16) & 0xFF);
76 buffer[*offset + 3] = (u8) ((value >> 24) & 0xFF);
77 *offset += sizeof(value);
78}
79EXPORT_SYMBOL_GPL(CsrUint32Ser);
80
81void CsrMemCpySer(u8 *buffer, size_t *offset, const void *value, size_t length)
82{
83 memcpy(&buffer[*offset], value, length);
84 *offset += length;
85}
86EXPORT_SYMBOL_GPL(CsrMemCpySer);
87
88void CsrCharStringSer(u8 *buffer, size_t *offset, const char *value)
89{
90 if (value)
91 {
92 strcpy(((char *) &buffer[*offset]), value);
93 *offset += strlen(value) + 1;
94 }
95 else
96 {
97 CsrUint8Ser(buffer, offset, 0);
98 }
99}
100EXPORT_SYMBOL_GPL(CsrCharStringSer);
diff --git a/drivers/staging/csr/csr_time.c b/drivers/staging/csr/csr_time.c
deleted file mode 100644
index 01179e46f47d..000000000000
--- a/drivers/staging/csr/csr_time.c
+++ /dev/null
@@ -1,33 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2010
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#include <linux/kernel.h>
12#include <linux/time.h>
13#include <linux/module.h>
14
15#include "csr_time.h"
16
17u32 CsrTimeGet(u32 *high)
18{
19 struct timespec ts;
20 u64 time;
21 u32 low;
22
23 ts = current_kernel_time();
24 time = (u64) ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
25
26 if (high != NULL)
27 *high = (u32) ((time >> 32) & 0xFFFFFFFF);
28
29 low = (u32) (time & 0xFFFFFFFF);
30
31 return low;
32}
33EXPORT_SYMBOL_GPL(CsrTimeGet);
diff --git a/drivers/staging/csr/csr_time.h b/drivers/staging/csr/csr_time.h
deleted file mode 100644
index fc29e8e5e478..000000000000
--- a/drivers/staging/csr/csr_time.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#ifndef CSR_TIME_H__
2#define CSR_TIME_H__
3/*****************************************************************************
4
5(c) Cambridge Silicon Radio Limited 2010
6All rights reserved and confidential information of CSR
7
8Refer to LICENSE.txt included with this source for details
9on the license terms.
10
11*****************************************************************************/
12
13#include <linux/types.h>
14
15/*******************************************************************************
16
17NAME
18 CsrTimeGet
19
20DESCRIPTION
21 Returns the current system time in a low and a high part. The low part
22 is expressed in microseconds. The high part is incremented when the low
23 part wraps to provide an extended range.
24
25 The caller may provide a NULL pointer as the high parameter.
26 In this case the function just returns the low part and ignores the
27 high parameter.
28
29 Although the time is expressed in microseconds the actual resolution is
30 platform dependent and can be less. It is recommended that the
31 resolution is at least 10 milliseconds.
32
33PARAMETERS
34 high - Pointer to variable that will receive the high part of the
35 current system time. Passing NULL is valid.
36
37RETURNS
38 Low part of current system time in microseconds.
39
40*******************************************************************************/
41u32 CsrTimeGet(u32 *high);
42
43
44/*------------------------------------------------------------------*/
45/* CsrTime Macros */
46/*------------------------------------------------------------------*/
47
48/*----------------------------------------------------------------------------*
49 * NAME
50 * CsrTimeAdd
51 *
52 * DESCRIPTION
53 * Add two time values. Adding the numbers can overflow the range of a
54 * CsrTime, so the user must be cautious.
55 *
56 * RETURNS
57 * CsrTime - the sum of "t1" and "t2".
58 *
59 *----------------------------------------------------------------------------*/
60#define CsrTimeAdd(t1, t2) ((t1) + (t2))
61
62/*----------------------------------------------------------------------------*
63 * NAME
64 * CsrTimeSub
65 *
66 * DESCRIPTION
67 * Subtract two time values. Subtracting the numbers can provoke an
68 * underflow, so the user must be cautious.
69 *
70 * RETURNS
71 * CsrTime - "t1" - "t2".
72 *
73 *----------------------------------------------------------------------------*/
74#define CsrTimeSub(t1, t2) ((s32) (t1) - (s32) (t2))
75
76#endif
diff --git a/drivers/staging/csr/csr_util.c b/drivers/staging/csr/csr_util.c
deleted file mode 100644
index c3aa9d509e5c..000000000000
--- a/drivers/staging/csr/csr_util.c
+++ /dev/null
@@ -1,15 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2010
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#include <linux/module.h>
12
13MODULE_DESCRIPTION("CSR Operating System Kernel Abstraction");
14MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
15MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/csr/csr_wifi_common.h b/drivers/staging/csr/csr_wifi_common.h
deleted file mode 100644
index efc43a525a3d..000000000000
--- a/drivers/staging/csr/csr_wifi_common.h
+++ /dev/null
@@ -1,101 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#ifndef CSR_WIFI_COMMON_H__
12#define CSR_WIFI_COMMON_H__
13
14#include <linux/types.h>
15#include "csr_result.h"
16
17/* MAC address */
18typedef struct
19{
20 u8 a[6];
21} CsrWifiMacAddress;
22
23/* IPv4 address */
24typedef struct
25{
26 u8 a[4];
27} CsrWifiIp4Address;
28
29/* IPv6 address */
30typedef struct
31{
32 u8 a[16];
33} CsrWifiIp6Address;
34
35typedef struct
36{
37 u8 ssid[32];
38 u8 length;
39} CsrWifiSsid;
40
41/*******************************************************************************
42
43 DESCRIPTION
44 Result values used on the Wifi Interfaces
45
46 VALUES
47 CSR_RESULT_SUCCESS
48 - The request/procedure succeeded
49 CSR_RESULT_FAILURE
50 - The request/procedure did not succeed because of an error
51 CSR_WIFI_RESULT_NOT_FOUND
52 - The request did not succeed because some resource was not
53 found.
54 CSR_WIFI_RESULT_TIMED_OUT
55 - The request/procedure did not succeed because of a time out
56 CSR_WIFI_RESULT_CANCELLED
57 - The request was canceled due to another conflicting
58 request that was issued before this one was completed
59 CSR_WIFI_RESULT_INVALID_PARAMETER
60 - The request/procedure did not succeed because it had an
61 invalid parameter
62 CSR_WIFI_RESULT_NO_ROOM
63 - The request did not succeed due to a lack of resources,
64 e.g. out of memory problem.
65 CSR_WIFI_RESULT_UNSUPPORTED
66 - The request/procedure did not succeed because the feature
67 is not supported yet
68 CSR_WIFI_RESULT_UNAVAILABLE
69 - The request cannot be processed at this time
70 CSR_WIFI_RESULT_WIFI_OFF
71 - The requested action is not available because Wi-Fi is
72 currently off
73 CSR_WIFI_RESULT_SECURITY_ERROR
74 - The request/procedure did not succeed because of a security
75 error
76 CSR_WIFI_RESULT_MIB_SET_FAILURE
77 - MIB Set Failure: either the MIB OID to be written to does
78 not exist or the MIB Value is invalid.
79 CSR_WIFI_RESULT_INVALID_INTERFACE_TAG
80 - The supplied Interface Tag is not valid.
81 CSR_WIFI_RESULT_P2P_NOA_CONFIG_CONFLICT
82 - The new NOA configuration conflicts with the existing NOA configuration
83 hence not accepted"
84*******************************************************************************/
85#define CSR_WIFI_RESULT_NOT_FOUND ((CsrResult) 0x0001)
86#define CSR_WIFI_RESULT_TIMED_OUT ((CsrResult) 0x0002)
87#define CSR_WIFI_RESULT_CANCELLED ((CsrResult) 0x0003)
88#define CSR_WIFI_RESULT_INVALID_PARAMETER ((CsrResult) 0x0004)
89#define CSR_WIFI_RESULT_NO_ROOM ((CsrResult) 0x0005)
90#define CSR_WIFI_RESULT_UNSUPPORTED ((CsrResult) 0x0006)
91#define CSR_WIFI_RESULT_UNAVAILABLE ((CsrResult) 0x0007)
92#define CSR_WIFI_RESULT_WIFI_OFF ((CsrResult) 0x0008)
93#define CSR_WIFI_RESULT_SECURITY_ERROR ((CsrResult) 0x0009)
94#define CSR_WIFI_RESULT_MIB_SET_FAILURE ((CsrResult) 0x000A)
95#define CSR_WIFI_RESULT_INVALID_INTERFACE_TAG ((CsrResult) 0x000B)
96#define CSR_WIFI_RESULT_P2P_NOA_CONFIG_CONFLICT ((CsrResult) 0x000C)
97
98#define CSR_WIFI_VERSION "5.1.0.0"
99
100#endif
101
diff --git a/drivers/staging/csr/csr_wifi_fsm.h b/drivers/staging/csr/csr_wifi_fsm.h
deleted file mode 100644
index fc5c5aa6a3c4..000000000000
--- a/drivers/staging/csr/csr_wifi_fsm.h
+++ /dev/null
@@ -1,240 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#ifndef CSR_WIFI_FSM_H
12#define CSR_WIFI_FSM_H
13
14#include "csr_prim_defs.h"
15#include "csr_log_text.h"
16#include "csr_wifi_fsm_event.h"
17
18/* including this file for CsrWifiInterfaceMode*/
19#include "csr_wifi_common.h"
20
21#define CSR_WIFI_FSM_ENV (0xFFFF)
22
23/**
24 * @brief
25 * Toplevel FSM context data
26 *
27 * @par Description
28 * Holds ALL FSM static and dynamic data for a FSM
29 */
30typedef struct CsrWifiFsmContext CsrWifiFsmContext;
31
32/**
33 * @brief
34 * FSM External Wakeup CallbackFunction Pointer
35 *
36 * @par Description
37 * Defines the external wakeup function for the FSM
38 * to call when an external event is injected into the systen
39 *
40 * @param[in] context : External context
41 *
42 * @return
43 * void
44 */
45typedef void (*CsrWifiFsmExternalWakupCallbackPtr)(void *context);
46
47/**
48 * @brief
49 * Initialises a top level FSM context
50 *
51 * @par Description
52 * Initialises the FSM Context to an initial state and allocates
53 * space for "maxProcesses" number of instances
54 *
55 * @param[in] osaContext : OSA context
56 * @param[in] applicationContext : Internal fsm application context
57 * @param[in] externalContext : External context
58 * @param[in] maxProcesses : Max processes to allocate room for
59 *
60 * @return
61 * CsrWifiFsmContext* fsm context
62 */
63extern CsrWifiFsmContext* CsrWifiFsmInit(void *applicationContext, void *externalContext, u16 maxProcesses, CsrLogTextTaskId loggingTaskId);
64
65/**
66 * @brief
67 * Resets the FSM's back to first conditions
68 *
69 * @par Description
70 * This function is used to free any dynamic resources allocated for the
71 * given context by CsrWifiFsmInit().
72 * The FSM's reset function is called to cleanup any fsm specific memory
73 * The reset function does NOT need to free the fsm data pointer as
74 * CsrWifiFsmShutdown() will do it.
75 * the FSM's init function is call again to reinitialise the FSM context.
76 * CsrWifiFsmReset() should NEVER be called when CsrWifiFsmExecute() is running.
77 *
78 * @param[in] context : FSM context
79 *
80 * @return
81 * void
82 */
83extern void CsrWifiFsmReset(CsrWifiFsmContext *context);
84
85/**
86 * @brief
87 * Frees resources allocated by CsrWifiFsmInit
88 *
89 * @par Description
90 * This function is used to free any dynamic resources allocated for the
91 * given context by CsrWifiFsmInit(), prior to complete termination of
92 * the program.
93 * The FSM's reset function is called to cleanup any fsm specific memory.
94 * The reset function does NOT need to free the fsm data pointer as
95 * CsrWifiFsmShutdown() will do it.
96 * CsrWifiFsmShutdown() should NEVER be called when CsrWifiFsmExecute() is running.
97 *
98 * @param[in] context : FSM context
99 *
100 * @return
101 * void
102 */
103extern void CsrWifiFsmShutdown(CsrWifiFsmContext *context);
104
105/**
106 * @brief
107 * Executes the fsm context
108 *
109 * @par Description
110 * Executes the FSM context and runs until ALL events in the context are processed.
111 * When no more events are left to process then CsrWifiFsmExecute() returns to a time
112 * specifying when to next call the CsrWifiFsmExecute()
113 * Scheduling, threading, blocking and external event notification are outside
114 * the scope of the FSM and CsrWifiFsmExecute().
115 *
116 * @param[in] context : FSM context
117 *
118 * @return
119 * u32 Time in ms until next timeout or 0xFFFFFFFF for no timer set
120 */
121extern u32 CsrWifiFsmExecute(CsrWifiFsmContext *context);
122
123/**
124 * @brief
125 * Adds an event to the FSM context's external event queue for processing
126 *
127 * @par Description
128 * Adds an event to the contexts external queue
129 * This is thread safe and adds an event to the fsm's external event queue.
130 *
131 * @param[in] context : FSM context
132 * @param[in] event : event to add to the event queue
133 * @param[in] source : source of the event (this can be a synergy task queue or an fsm instance id)
134 * @param[in] destination : destination of the event (This can be a fsm instance id or CSR_WIFI_FSM_ENV)
135 * @param[in] id : event id
136 *
137 * @return
138 * void
139 */
140extern void CsrWifiFsmSendEventExternal(CsrWifiFsmContext *context, CsrWifiFsmEvent *event, u16 source, u16 destination, CsrPrim primtype, u16 id);
141
142/**
143 * @brief
144 * Adds an Alien event to the FSM context's external event queue for processing
145 *
146 * @par Description
147 * Adds an event to the contexts external queue
148 * This is thread safe and adds an event to the fsm's external event queue.
149 *
150 * @param[in] context : FSM context
151 * @param[in] event : event to add to the event queue
152 * @param[in] source : source of the event (this can be a synergy task queue or an fsm instance id)
153 * @param[in] destination : destination of the event (This can be a fsm instance id or CSR_WIFI_FSM_ENV)
154 * @param[in] id : event id
155 */
156#define CsrWifiFsmSendAlienEventExternal(_context, _alienEvent, _source, _destination, _primtype, _id) \
157 { \
158 CsrWifiFsmAlienEvent *_evt = kmalloc(sizeof(CsrWifiFsmAlienEvent), GFP_KERNEL); \
159 _evt->alienEvent = _alienEvent; \
160 CsrWifiFsmSendEventExternal(_context, (CsrWifiFsmEvent *)_evt, _source, _destination, _primtype, _id); \
161 }
162
163
164/**
165 * @brief
166 * Current time of day in ms
167 *
168 * @param[in] context : FSM context
169 *
170 * @return
171 * u32 32 bit ms tick
172 */
173extern u32 CsrWifiFsmGetTimeOfDayMs(CsrWifiFsmContext *context);
174
175/**
176 * @brief
177 * Gets the time until the next FSM timer expiry
178 *
179 * @par Description
180 * Returns the next timeout time or 0 if no timers are set.
181 *
182 * @param[in] context : FSM context
183 *
184 * @return
185 * u32 Time in ms until next timeout or 0xFFFFFFFF for no timer set
186 */
187extern u32 CsrWifiFsmGetNextTimeout(CsrWifiFsmContext *context);
188
189/**
190 * @brief
191 * Fast forwards the fsm timers by ms Milliseconds
192 *
193 * @param[in] context : FSM context
194 * @param[in] ms : Milliseconds to fast forward by
195 *
196 * @return
197 * void
198 */
199extern void CsrWifiFsmFastForward(CsrWifiFsmContext *context, u16 ms);
200
201/**
202 * @brief
203 * shift the current time of day by ms amount
204 *
205 * @par Description
206 * useful to speed up tests where time needs to pass
207 *
208 * @param[in] context : FSM context
209 * @param[in] ms : ms to adjust time by
210 *
211 * @return
212 * void
213 */
214extern void CsrWifiFsmTestAdvanceTime(CsrWifiFsmContext *context, u32 ms);
215
216/**
217 * @brief
218 * Check if the fsm has events to process
219 *
220 * @param[in] context : FSM context
221 *
222 * @return
223 * u8 returns TRUE if there are events for the FSM to process
224 */
225extern u8 CsrWifiFsmHasEvents(CsrWifiFsmContext *context);
226
227/**
228 * @brief
229 * function that installs the contexts wakeup function
230 *
231 * @param[in] context : FSM context
232 * @param[in] callback : Callback function pointer
233 *
234 * @return
235 * void
236 */
237extern void CsrWifiFsmInstallWakeupCallback(CsrWifiFsmContext *context, CsrWifiFsmExternalWakupCallbackPtr callback);
238
239#endif /* CSR_WIFI_FSM_H */
240
diff --git a/drivers/staging/csr/csr_wifi_fsm_event.h b/drivers/staging/csr/csr_wifi_fsm_event.h
deleted file mode 100644
index 0690ca955ef5..000000000000
--- a/drivers/staging/csr/csr_wifi_fsm_event.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#ifndef CSR_WIFI_FSM_EVENT_H
12#define CSR_WIFI_FSM_EVENT_H
13
14#include "csr_prim_defs.h"
15#include "csr_sched.h"
16
17/**
18 * @brief
19 * FSM event header.
20 *
21 * @par Description
22 * All events MUST have this struct as the FIRST member.
23 * The next member is used internally for linked lists
24 */
25typedef struct CsrWifiFsmEvent
26{
27 CsrPrim type;
28 u16 primtype;
29 CsrSchedQid destination;
30 CsrSchedQid source;
31
32 /* Private pointer to allow an optimal Event list */
33 /* NOTE: Ignore this pointer.
34 * Do not waste code initializing OR freeing it.
35 * The pointer is used internally in the CsrWifiFsm code
36 * to avoid a second malloc when queuing events.
37 */
38 struct CsrWifiFsmEvent *next;
39} CsrWifiFsmEvent;
40
41#endif /* CSR_WIFI_FSM_EVENT_H */
42
diff --git a/drivers/staging/csr/csr_wifi_fsm_types.h b/drivers/staging/csr/csr_wifi_fsm_types.h
deleted file mode 100644
index d21c60a81fcf..000000000000
--- a/drivers/staging/csr/csr_wifi_fsm_types.h
+++ /dev/null
@@ -1,430 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#ifndef CSR_WIFI_FSM_TYPES_H
12#define CSR_WIFI_FSM_TYPES_H
13
14#include <linux/types.h>
15#include "csr_macro.h"
16#include "csr_sched.h"
17
18#ifdef CSR_WIFI_FSM_MUTEX_ENABLE
19#include "csr_framework_ext.h"
20#endif
21
22#include "csr_wifi_fsm.h"
23
24#define CSR_WIFI_FSM_MAX_TRANSITION_HISTORY 10
25
26/**
27 * @brief
28 * FSM event list header.
29 *
30 * @par Description
31 * Singly linked list of events.
32 */
33typedef struct CsrWifiFsmEventList
34{
35 CsrWifiFsmEvent *first;
36 CsrWifiFsmEvent *last;
37} CsrWifiFsmEventList;
38
39
40/**
41 * @brief
42 * FSM timer id.
43 *
44 * @par Description
45 * Composite Id made up of the type, dest and a unique id so
46 * CsrWifiFsmRemoveTimer knows where to look when removing the timer
47 */
48typedef struct CsrWifiFsmTimerId
49{
50 CsrPrim type;
51 u16 primtype;
52 CsrSchedQid destination;
53 u16 uniqueid;
54} CsrWifiFsmTimerId;
55
56/**
57 * @brief
58 * FSM timer header.
59 *
60 * @par Description
61 * All timer MUST have this struct as the FIRST member.
62 * The first members of the structure MUST remain compatable
63 * with the CsrWifiFsmEvent so that timers are just specialised events
64 */
65typedef struct CsrWifiFsmTimer
66{
67 CsrPrim type;
68 u16 primtype;
69 CsrSchedQid destination;
70 CsrSchedQid source;
71
72 /* Private pointer to allow an optimal Event list */
73 struct CsrWifiFsmTimer *next;
74
75 CsrWifiFsmTimerId timerid;
76 u32 timeoutTimeMs;
77} CsrWifiFsmTimer;
78
79
80/**
81 * @brief
82 * Fsm Alien Event
83 *
84 * @par Description
85 * Allows the wrapping of alien events that do not use CsrWifiFsmEvent
86 * as the first member of the Event struct
87 */
88typedef struct
89{
90 CsrWifiFsmEvent event;
91 void *alienEvent;
92} CsrWifiFsmAlienEvent;
93
94
95/**
96 * @brief
97 * FSM timer list header.
98 *
99 * @par Description
100 * Singly linked list of timers.
101 */
102typedef struct CsrWifiFsmTimerList
103{
104 CsrWifiFsmTimer *first;
105 CsrWifiFsmTimer *last;
106 u16 nexttimerid;
107} CsrWifiFsmTimerList;
108
109/**
110 * @brief
111 * Process Entry Function Pointer
112 *
113 * @par Description
114 * Defines the entry function for a processes.
115 * Called at process initialisation.
116 *
117 * @param[in] context : FSM context
118 *
119 * @return
120 * void
121 */
122typedef void (*CsrWifiFsmProcEntryFnPtr)(CsrWifiFsmContext *context);
123
124/**
125 * @brief
126 * Process Transition Function Pointer
127 *
128 * @par Description
129 * Defines a transition function for a processes.
130 * Called when an event causes a transition on a process
131 *
132 * @param[in] CsrWifiFsmContext* : FSM context
133 * @param[in] void* : FSM data (can be NULL)
134 * @param[in] const CsrWifiFsmEvent* : event to process
135 *
136 * @return
137 * void
138 */
139typedef void (*CsrWifiFsmTransitionFnPtr)(CsrWifiFsmContext *context, void *fsmData, const CsrWifiFsmEvent *event);
140
141/**
142 * @brief
143 * Process reset/shutdown Function Pointer
144 *
145 * @par Description
146 * Defines the reset/shutdown function for a processes.
147 * Called to reset or shutdown an fsm.
148 *
149 * @param[in] context : FSM context
150 *
151 * @return
152 * void
153 */
154typedef void (*CsrWifiFsmProcResetFnPtr)(CsrWifiFsmContext *context);
155
156/**
157 * @brief
158 * FSM Default Destination CallbackFunction Pointer
159 *
160 * @par Description
161 * Defines the default destination function for the FSM
162 * to call when an event does not have a valid destination.
163 * This
164 *
165 * @param[in] context : External context
166 *
167 * @return
168 * u16 a valid destination OR CSR_WIFI_FSM_ENV
169 */
170typedef u16 (*CsrWifiFsmDestLookupCallbackPtr)(void *context, const CsrWifiFsmEvent *event);
171
172
173#ifdef CSR_WIFI_FSM_DUMP_ENABLE
174/**
175 * @brief
176 * Trace Dump Function Pointer
177 *
178 * @par Description
179 * Called when we want to trace the FSM
180 *
181 * @param[in] context : FSM context
182 * @param[in] id : fsm id
183 *
184 * @return
185 * void
186 */
187typedef void (*CsrWifiFsmDumpFnPtr)(CsrWifiFsmContext *context, void *fsmData);
188#endif
189
190/**
191 * @brief
192 * Event ID to transition function entry
193 *
194 * @par Description
195 * Event ID to Transition Entry in a state table.
196 */
197typedef struct
198{
199 u32 eventid;
200 CsrWifiFsmTransitionFnPtr transition;
201#ifdef CSR_LOG_ENABLE
202 const char *transitionName;
203#endif
204} CsrWifiFsmEventEntry;
205
206/**
207 * @brief
208 * Single State's Transition Table
209 *
210 * @par Description
211 * Stores Data for a single State's event to
212 * transition functions mapping
213 */
214typedef struct
215{
216 const u8 numEntries;
217 const u8 saveAll;
218 const CsrWifiFsmEventEntry *eventEntryArray; /* array of transition function pointers for state */
219#ifdef CSR_LOG_ENABLE
220 u16 stateNumber;
221 const char *stateName;
222#endif
223} CsrWifiFsmTableEntry;
224
225/**
226 * @brief
227 * Process State Transtion table
228 *
229 * @par Description
230 * Stores Data for a processes State to transition table
231 */
232typedef struct
233{
234 u16 numStates; /* number of states */
235 const CsrWifiFsmTableEntry *aStateEventMatrix; /* state event matrix */
236} CsrWifiFsmTransitionFunctionTable;
237
238/**
239 * @brief
240 * Const Process definition
241 *
242 * @par Description
243 * Constant process specification.
244 * This is ALL the non dynamic data that defines
245 * a process.
246 */
247typedef struct
248{
249 const char *processName;
250 const u32 processId;
251 const CsrWifiFsmTransitionFunctionTable transitionTable;
252 const CsrWifiFsmTableEntry unhandledTransitions;
253 const CsrWifiFsmTableEntry ignoreFunctions;
254 const CsrWifiFsmProcEntryFnPtr entryFn;
255 const CsrWifiFsmProcResetFnPtr resetFn;
256#ifdef CSR_WIFI_FSM_DUMP_ENABLE
257 const CsrWifiFsmDumpFnPtr dumpFn; /* Called to dump fsm specific trace if not NULL */
258#endif
259} CsrWifiFsmProcessStateMachine;
260
261#ifdef CSR_WIFI_FSM_DUMP_ENABLE
262/**
263 * @brief
264 * Storage for state transition info
265 */
266typedef struct
267{
268 u16 transitionNumber;
269 CsrWifiFsmEvent event;
270 u16 fromState;
271 u16 toState;
272 CsrWifiFsmTransitionFnPtr transitionFn;
273 u16 transitionCount; /* number consecutive of times this transition was seen */
274#ifdef CSR_LOG_ENABLE
275 const char *transitionName;
276#endif
277} CsrWifiFsmTransitionRecord;
278
279/**
280 * @brief
281 * Storage for the last state X transitions
282 */
283typedef struct
284{
285 u16 numTransitions;
286 CsrWifiFsmTransitionRecord records[CSR_WIFI_FSM_MAX_TRANSITION_HISTORY];
287} CsrWifiFsmTransitionRecords;
288#endif
289
290/**
291 * @brief
292 * Dynamic Process data
293 *
294 * @par Description
295 * Dynamic process data that is used to keep track of the
296 * state and data for a process instance
297 */
298typedef struct
299{
300 const CsrWifiFsmProcessStateMachine *fsmInfo; /* state machine info that is constant regardless of context */
301 u16 instanceId; /* Runtime process id */
302 u16 state; /* Current state */
303 void *params; /* Instance user data */
304 CsrWifiFsmEventList savedEventQueue; /* The saved event queue */
305 struct CsrWifiFsmInstanceEntry *subFsm; /* Sub Fsm instance data */
306 struct CsrWifiFsmInstanceEntry *subFsmCaller; /* The Fsm instance that created the SubFsm and should be used for callbacks*/
307#ifdef CSR_WIFI_FSM_DUMP_ENABLE
308 CsrWifiFsmTransitionRecords transitionRecords; /* Last X transitions in the FSM */
309#endif
310} CsrWifiFsmInstanceEntry;
311
312/**
313 * @brief
314 * OnCreate Callback Function Pointer
315 *
316 * @par Description
317 * Called when an fsm is created.
318 *
319 * @param[in] extContext : External context
320 * @param[in] instance : FSM instance
321 *
322 * @return
323 * void
324 */
325typedef void (*CsrWifiFsmOnCreateFnPtr)(void *extContext, const CsrWifiFsmInstanceEntry *instance);
326
327/**
328 * @brief
329 * OnTransition Callback Function Pointer
330 *
331 * @par Description
332 * Called when an event is processed by a fsm
333 *
334 * @param[in] extContext : External context
335 * @param[in] eventEntryArray : Entry data
336 * @param[in] event : Event
337 *
338 * @return
339 * void
340 */
341typedef void (*CsrWifiFsmOnTransitionFnPtr)(void *extContext, const CsrWifiFsmEventEntry *eventEntryArray, const CsrWifiFsmEvent *event);
342
343/**
344 * @brief
345 * OnStateChange Callback Function Pointer
346 *
347 * @par Description
348 * Called when CsrWifiFsmNextState is called
349 *
350 * @param[in] extContext : External context
351 *
352 * @return
353 * void
354 */
355typedef void (*CsrWifiFsmOnStateChangeFnPtr)(void *extContext, u16 nextstate);
356
357/**
358 * @brief
359 * OnIgnore,OnError or OnInvalid Callback Function Pointer
360 *
361 * @par Description
362 * Called when an event is processed by a fsm
363 *
364 * @param[in] extContext : External context
365 * @param[in] event : Event
366 *
367 * @return
368 * void
369 */
370typedef void (*CsrWifiFsmOnEventFnPtr)(void *extContext, const CsrWifiFsmEvent *event);
371
372/**
373 * @brief
374 * Toplevel FSM context data
375 *
376 * @par Description
377 * Holds ALL FSM static and dynamic data for a FSM
378 */
379struct CsrWifiFsmContext
380{
381 CsrWifiFsmEventList eventQueue; /* The internal event queue */
382 CsrWifiFsmEventList externalEventQueue; /* The external event queue */
383#ifdef CSR_WIFI_FSM_MUTEX_ENABLE
384 CsrMutexHandle externalEventQueueLock; /* The external event queue mutex */
385#endif
386 u32 timeOffset; /* Amount to adjust the TimeOfDayMs by */
387 CsrWifiFsmTimerList timerQueue; /* The internal timer queue */
388 u8 useTempSaveList; /* Should the temp save list be used */
389 CsrWifiFsmEventList tempSaveList; /* The temp save event queue */
390 CsrWifiFsmEvent *eventForwardedOrSaved; /* The event that was forwarded or Saved */
391 u16 maxProcesses; /* Size of instanceArray */
392 u16 numProcesses; /* Current number allocated in instanceArray */
393 CsrWifiFsmInstanceEntry *instanceArray; /* Array of processes for this component */
394 CsrWifiFsmInstanceEntry *ownerInstance; /* The Process that owns currentInstance (SubFsm support) */
395 CsrWifiFsmInstanceEntry *currentInstance; /* Current Process that is executing */
396 CsrWifiFsmExternalWakupCallbackPtr externalEventFn; /* External event Callback */
397 CsrWifiFsmOnEventFnPtr appIgnoreCallback; /* Application Ignore event Callback */
398 CsrWifiFsmDestLookupCallbackPtr appEvtDstCallback; /* Application Lookup event Destination Function*/
399
400 void *applicationContext; /* Internal fsm application context */
401 void *externalContext; /* External context (set by the user of the fsm)*/
402 CsrLogTextTaskId loggingTaskId; /* Task Id to use in any logging output */
403
404#ifndef CSR_WIFI_FSM_SCHEDULER_DISABLED
405 CsrSchedTid schedTimerId; /* Scheduler TimerId for use in Scheduler Tasks */
406 u32 schedTimerNexttimeoutMs; /* Next timeout time for the current timer */
407#endif
408
409#ifdef CSR_WIFI_FSM_MUTEX_ENABLE
410#ifdef CSR_WIFI_FSM_TRANSITION_LOCK
411 CsrMutexHandle transitionLock; /* Lock when calling transition functions */
412#endif
413#endif
414
415#ifdef CSR_LOG_ENABLE
416 CsrWifiFsmOnCreateFnPtr onCreate; /* Debug Transition Callback */
417 CsrWifiFsmOnTransitionFnPtr onTransition; /* Debug Transition Callback */
418 CsrWifiFsmOnTransitionFnPtr onUnhandedCallback; /* Unhanded event Callback */
419 CsrWifiFsmOnStateChangeFnPtr onStateChange; /* Debug State Change Callback */
420 CsrWifiFsmOnEventFnPtr onIgnoreCallback; /* Ignore event Callback */
421 CsrWifiFsmOnEventFnPtr onSaveCallback; /* Save event Callback */
422 CsrWifiFsmOnEventFnPtr onErrorCallback; /* Error event Callback */
423 CsrWifiFsmOnEventFnPtr onInvalidCallback; /* Invalid event Callback */
424#endif
425#ifdef CSR_WIFI_FSM_DUMP_ENABLE
426 u16 masterTransitionNumber; /* Increments on every transition */
427#endif
428};
429
430#endif /* CSR_WIFI_FSM_TYPES_H */
diff --git a/drivers/staging/csr/csr_wifi_hip_card.h b/drivers/staging/csr/csr_wifi_hip_card.h
deleted file mode 100644
index bd47f606e0de..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_card.h
+++ /dev/null
@@ -1,114 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2012
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 ******************************************************************************
13 * FILE : csr_wifi_hip_card.h
14 *
15 * PURPOSE : Defines abstract interface for hardware specific functions.
16 * Note, this is a different file from one of the same name in the
17 * Windows driver.
18 *
19 *****************************************************************************
20 */
21#ifndef __CARD_H__
22#define __CARD_H__
23
24#include "csr_wifi_hip_card_sdio.h"
25#include "csr_wifi_hip_signals.h"
26#include "csr_wifi_hip_unifi_udi.h"
27
28
29/*****************************************************************************
30 * CardEnableInt -
31 */
32CsrResult CardEnableInt(card_t *card);
33
34/*****************************************************************************
35 * CardGenInt -
36 */
37CsrResult CardGenInt(card_t *card);
38
39/*****************************************************************************
40 * CardPendingInt -
41 */
42CsrResult CardPendingInt(card_t *card, u8 *pintr);
43
44/*****************************************************************************
45 * CardDisableInt -
46 */
47CsrResult CardDisableInt(card_t *card);
48
49/*****************************************************************************
50 * CardClearInt -
51 */
52CsrResult CardClearInt(card_t *card);
53
54/*****************************************************************************
55 * CardDisable -
56 */
57void CardDisable(card_t *card);
58
59/*****************************************************************************
60 * CardIntEnabled -
61 */
62CsrResult CardIntEnabled(card_t *card, u8 *enabled);
63
64/*****************************************************************************
65 * CardGetDataSlotSize
66 */
67u16 CardGetDataSlotSize(card_t *card);
68
69/*****************************************************************************
70 * CardWriteBulkData -
71 */
72CsrResult CardWriteBulkData(card_t *card, card_signal_t *csptr, unifi_TrafficQueue queue);
73
74
75/*****************************************************************************
76 * CardClearFromHostDataSlot -
77 */
78void CardClearFromHostDataSlot(card_t *card, const s16 aSlotNum);
79
80#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
81/*****************************************************************************
82 * CardClearFromHostDataSlotWithoutFreeingBulkData - Clear the data stot
83 * without freeing the bulk data
84 */
85
86void CardClearFromHostDataSlotWithoutFreeingBulkData(card_t *card, const s16 aSlotNum);
87#endif
88
89/*****************************************************************************
90 * CardGetFreeFromHostDataSlots -
91 */
92u16 CardGetFreeFromHostDataSlots(card_t *card);
93
94u16 CardAreAllFromHostDataSlotsEmpty(card_t *card);
95
96CsrResult card_start_processor(card_t *card, enum unifi_dbg_processors_select which);
97
98CsrResult card_wait_for_firmware_to_start(card_t *card, u32 *paddr);
99
100CsrResult unifi_dl_firmware(card_t *card, void *arg);
101CsrResult unifi_dl_patch(card_t *card, void *arg, u32 boot_ctrl);
102CsrResult unifi_do_loader_op(card_t *card, u32 op_addr, u8 opcode);
103void* unifi_dl_fw_read_start(card_t *card, s8 is_fw);
104
105CsrResult unifi_coredump_handle_request(card_t *card);
106
107CsrResult ConvertCsrSdioToCsrHipResult(card_t *card, CsrResult csrResult);
108#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
109void unifi_debug_log_to_buf(const char *fmt, ...);
110void unifi_debug_string_to_buf(const char *str);
111void unifi_debug_hex_to_buf(const char *buff, u16 length);
112#endif
113
114#endif /* __CARD_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio.c b/drivers/staging/csr/csr_wifi_hip_card_sdio.c
deleted file mode 100644
index d5425325894c..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio.c
+++ /dev/null
@@ -1,4001 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2012
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ---------------------------------------------------------------------------
13 * FILE: csr_wifi_hip_card_sdio.c
14 *
15 * PURPOSE: Implementation of the Card API for SDIO.
16 *
17 * NOTES:
18 * CardInit() is called from the SDIO probe callback when a card is
19 * inserted. This performs the basic SDIO initialisation, enabling i/o
20 * etc.
21 *
22 * ---------------------------------------------------------------------------
23 */
24#include <linux/slab.h>
25#include "csr_wifi_hip_unifi.h"
26#include "csr_wifi_hip_conversions.h"
27#include "csr_wifi_hip_unifiversion.h"
28#include "csr_wifi_hip_card.h"
29#include "csr_wifi_hip_card_sdio.h"
30#include "csr_wifi_hip_chiphelper.h"
31
32
33/* Time to wait between attempts to read MAILBOX0 */
34#define MAILBOX1_TIMEOUT 10 /* in millisecs */
35#define MAILBOX1_ATTEMPTS 200 /* 2 seconds */
36
37#define MAILBOX2_TIMEOUT 5 /* in millisecs */
38#define MAILBOX2_ATTEMPTS 10 /* 50ms */
39
40#define RESET_SETTLE_DELAY 25 /* in millisecs */
41
42static CsrResult card_init_slots(card_t *card);
43static CsrResult card_hw_init(card_t *card);
44static CsrResult firmware_present_in_flash(card_t *card);
45static void bootstrap_chip_hw(card_t *card);
46static CsrResult unifi_reset_hardware(card_t *card);
47static CsrResult unifi_hip_init(card_t *card);
48static CsrResult card_access_panic(card_t *card);
49static CsrResult unifi_read_chip_version(card_t *card);
50
51/*
52 * ---------------------------------------------------------------------------
53 * unifi_alloc_card
54 *
55 * Allocate and initialise the card context structure.
56 *
57 * Arguments:
58 * sdio Pointer to SDIO context pointer to pass to low
59 * level i/o functions.
60 * ospriv Pointer to O/S private struct to pass when calling
61 * callbacks to the higher level system.
62 *
63 * Returns:
64 * Pointer to card struct, which represents the driver context or
65 * NULL if the allocation failed.
66 * ---------------------------------------------------------------------------
67 */
68card_t* unifi_alloc_card(CsrSdioFunction *sdio, void *ospriv)
69{
70 card_t *card;
71 u32 i;
72
73
74 card = kzalloc(sizeof(card_t), GFP_KERNEL);
75 if (card == NULL)
76 {
77 return NULL;
78 }
79
80 card->sdio_if = sdio;
81 card->ospriv = ospriv;
82
83 card->unifi_interrupt_seq = 1;
84
85 /* Make these invalid. */
86 card->proc_select = (u32)(-1);
87 card->dmem_page = (u32)(-1);
88 card->pmem_page = (u32)(-1);
89
90 card->bh_reason_host = 0;
91 card->bh_reason_unifi = 0;
92
93 for (i = 0; i < sizeof(card->tx_q_paused_flag) / sizeof(card->tx_q_paused_flag[0]); i++)
94 {
95 card->tx_q_paused_flag[i] = 0;
96 }
97 card->memory_resources_allocated = 0;
98
99 card->low_power_mode = UNIFI_LOW_POWER_DISABLED;
100 card->periodic_wake_mode = UNIFI_PERIODIC_WAKE_HOST_DISABLED;
101
102 card->host_state = UNIFI_HOST_STATE_AWAKE;
103 card->intmode = CSR_WIFI_INTMODE_DEFAULT;
104
105 /*
106 * Memory resources for buffers are allocated when the chip is initialised
107 * because we need configuration information from the firmware.
108 */
109
110 /*
111 * Initialise wait queues and lists
112 */
113 card->fh_command_queue.q_body = card->fh_command_q_body;
114 card->fh_command_queue.q_length = UNIFI_SOFT_COMMAND_Q_LENGTH;
115
116 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
117 {
118 card->fh_traffic_queue[i].q_body = card->fh_traffic_q_body[i];
119 card->fh_traffic_queue[i].q_length = UNIFI_SOFT_TRAFFIC_Q_LENGTH;
120 }
121
122
123 /* Initialise mini-coredump pointers in case no coredump buffers
124 * are requested by the OS layer.
125 */
126 card->request_coredump_on_reset = 0;
127 card->dump_next_write = NULL;
128 card->dump_cur_read = NULL;
129 card->dump_buf = NULL;
130
131#ifdef UNIFI_DEBUG
132 /* Determine offset of LSB in pointer for later alignment sanity check.
133 * Synergy integer types have specific widths, which cause compiler
134 * warnings when casting pointer types, e.g. on 64-bit systems.
135 */
136 {
137 u32 val = 0x01234567;
138
139 if (*((u8 *)&val) == 0x01)
140 {
141 card->lsb = sizeof(void *) - 1; /* BE */
142 }
143 else
144 {
145 card->lsb = 0; /* LE */
146 }
147 }
148#endif
149 return card;
150} /* unifi_alloc_card() */
151
152
153/*
154 * ---------------------------------------------------------------------------
155 * unifi_init_card
156 *
157 * Reset the hardware and perform HIP initialization
158 *
159 * Arguments:
160 * card Pointer to card struct
161 *
162 * Returns:
163 * CsrResult code
164 * CSR_RESULT_SUCCESS if successful
165 * ---------------------------------------------------------------------------
166 */
167CsrResult unifi_init_card(card_t *card, s32 led_mask)
168{
169 CsrResult r;
170
171
172 if (card == NULL)
173 {
174 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
175 }
176
177 r = unifi_init(card);
178 if (r != CSR_RESULT_SUCCESS)
179 {
180 return r;
181 }
182
183 r = unifi_hip_init(card);
184 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
185 {
186 return r;
187 }
188 if (r != CSR_RESULT_SUCCESS)
189 {
190 unifi_error(card->ospriv, "Failed to start host protocol.\n");
191 return r;
192 }
193
194 return CSR_RESULT_SUCCESS;
195}
196
197
198/*
199 * ---------------------------------------------------------------------------
200 * unifi_init
201 *
202 * Init the hardware.
203 *
204 * Arguments:
205 * card Pointer to card struct
206 *
207 * Returns:
208 * CsrResult code
209 * CSR_RESULT_SUCCESS if successful
210 * ---------------------------------------------------------------------------
211 */
212CsrResult unifi_init(card_t *card)
213{
214 CsrResult r;
215 CsrResult csrResult;
216
217 if (card == NULL)
218 {
219 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
220 }
221
222 /*
223 * Disable the SDIO interrupts while initialising UniFi.
224 * Re-enable them when f/w is running.
225 */
226 csrResult = CsrSdioInterruptDisable(card->sdio_if);
227 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
228 {
229 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
230 }
231
232 /*
233 * UniFi's PLL may start with a slow clock (~ 1 MHz) so initially
234 * set the SDIO bus clock to a similar value or SDIO accesses may
235 * fail.
236 */
237 csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_SAFE_HZ);
238 if (csrResult != CSR_RESULT_SUCCESS)
239 {
240 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
241 return r;
242 }
243 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
244
245 /*
246 * Reset UniFi. Note, this only resets the WLAN function part of the chip,
247 * the SDIO interface is not reset.
248 */
249 unifi_trace(card->ospriv, UDBG1, "Resetting UniFi\n");
250 r = unifi_reset_hardware(card);
251 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
252 {
253 return r;
254 }
255 if (r != CSR_RESULT_SUCCESS)
256 {
257 unifi_error(card->ospriv, "Failed to reset UniFi\n");
258 return r;
259 }
260
261 /* Reset the power save mode, to be active until the MLME-reset is complete */
262 r = unifi_configure_low_power_mode(card,
263 UNIFI_LOW_POWER_DISABLED, UNIFI_PERIODIC_WAKE_HOST_DISABLED);
264 if (r != CSR_RESULT_SUCCESS)
265 {
266 unifi_error(card->ospriv, "Failed to set power save mode\n");
267 return r;
268 }
269
270 /*
271 * Set initial value of page registers.
272 * The page registers will be maintained by unifi_read...() and
273 * unifi_write...().
274 */
275 card->proc_select = (u32)(-1);
276 card->dmem_page = (u32)(-1);
277 card->pmem_page = (u32)(-1);
278 r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW3_PAGE(card->helper) * 2, 0);
279 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
280 {
281 return r;
282 }
283 if (r != CSR_RESULT_SUCCESS)
284 {
285 unifi_error(card->ospriv, "Failed to write SHARED_DMEM_PAGE\n");
286 return r;
287 }
288 r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW2_PAGE(card->helper) * 2, 0);
289 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
290 {
291 return r;
292 }
293 if (r != CSR_RESULT_SUCCESS)
294 {
295 unifi_error(card->ospriv, "Failed to write PROG_MEM2_PAGE\n");
296 return r;
297 }
298
299 /*
300 * If the driver has reset UniFi due to previous SDIO failure, this may
301 * have been due to a chip watchdog reset. In this case, the driver may
302 * have requested a mini-coredump which needs to be captured now the
303 * SDIO interface is alive.
304 */
305 (void)unifi_coredump_handle_request(card);
306
307 /*
308 * Probe to see if the UniFi has ROM/flash to boot from. CSR6xxx should do.
309 */
310 r = firmware_present_in_flash(card);
311 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
312 {
313 return r;
314 }
315 if (r == CSR_WIFI_HIP_RESULT_NOT_FOUND)
316 {
317 unifi_error(card->ospriv, "No firmware found\n");
318 }
319 else if (r != CSR_RESULT_SUCCESS)
320 {
321 unifi_error(card->ospriv, "Probe for Flash failed\n");
322 }
323
324 return r;
325} /* unifi_init() */
326
327
328/*
329 * ---------------------------------------------------------------------------
330 * unifi_download
331 *
332 * Load the firmware.
333 *
334 * Arguments:
335 * card Pointer to card struct
336 * led_mask Loader LED mask
337 *
338 * Returns:
339 * CSR_RESULT_SUCCESS on success
340 * CsrResult error code on failure.
341 * ---------------------------------------------------------------------------
342 */
343CsrResult unifi_download(card_t *card, s32 led_mask)
344{
345 CsrResult r;
346 void *dlpriv;
347
348 if (card == NULL)
349 {
350 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
351 }
352
353 /* Set the loader led mask */
354 card->loader_led_mask = led_mask;
355
356 /* Get the firmware file information */
357 unifi_trace(card->ospriv, UDBG1, "downloading firmware...\n");
358
359 dlpriv = unifi_dl_fw_read_start(card, UNIFI_FW_STA);
360 if (dlpriv == NULL)
361 {
362 return CSR_WIFI_HIP_RESULT_NOT_FOUND;
363 }
364
365 /* Download the firmware. */
366 r = unifi_dl_firmware(card, dlpriv);
367 if (r != CSR_RESULT_SUCCESS)
368 {
369 unifi_error(card->ospriv, "Failed to download firmware\n");
370 return r;
371 }
372
373 /* Free the firmware file information. */
374 unifi_fw_read_stop(card->ospriv, dlpriv);
375
376 return CSR_RESULT_SUCCESS;
377} /* unifi_download() */
378
379
380/*
381 * ---------------------------------------------------------------------------
382 * unifi_hip_init
383 *
384 * This function performs the f/w initialisation sequence as described
385 * in the Unifi Host Interface Protocol Specification.
386 * It allocates memory for host-side slot data and signal queues.
387 *
388 * Arguments:
389 * card Pointer to card struct
390 *
391 * Returns:
392 * CSR_RESULT_SUCCESS on success or else a CSR error code
393 *
394 * Notes:
395 * The firmware must have been downloaded.
396 * ---------------------------------------------------------------------------
397 */
398static CsrResult unifi_hip_init(card_t *card)
399{
400 CsrResult r;
401 CsrResult csrResult;
402
403 r = card_hw_init(card);
404 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
405 {
406 return r;
407 }
408 if (r != CSR_RESULT_SUCCESS)
409 {
410 unifi_error(card->ospriv, "Failed to establish communication with UniFi\n");
411 return r;
412 }
413#ifdef CSR_PRE_ALLOC_NET_DATA
414 /* if there is any preallocated netdata left from the prev session free it now */
415 prealloc_netdata_free(card);
416#endif
417 /*
418 * Allocate memory for host-side slot data and signal queues.
419 * We need the config info read from the firmware to know how much
420 * memory to allocate.
421 */
422 r = card_init_slots(card);
423 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
424 {
425 return r;
426 }
427 if (r != CSR_RESULT_SUCCESS)
428 {
429 unifi_error(card->ospriv, "Init slots failed: %d\n", r);
430 return r;
431 }
432
433 unifi_trace(card->ospriv, UDBG2, "Sending first UniFi interrupt\n");
434
435 r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
436 if (r != CSR_RESULT_SUCCESS)
437 {
438 return r;
439 }
440
441 /* Enable the SDIO interrupts now that the f/w is running. */
442 csrResult = CsrSdioInterruptEnable(card->sdio_if);
443 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
444 {
445 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
446 }
447
448 /* Signal the UniFi to start handling messages */
449 r = CardGenInt(card);
450 if (r != CSR_RESULT_SUCCESS)
451 {
452 return r;
453 }
454
455 return CSR_RESULT_SUCCESS;
456} /* unifi_hip_init() */
457
458
459/*
460 * ---------------------------------------------------------------------------
461 * _build_sdio_config_data
462 *
463 * Unpack the SDIO configuration information from a buffer read from
464 * UniFi into a host structure.
465 * The data is byte-swapped for a big-endian host if necessary by the
466 * UNPACK... macros.
467 *
468 * Arguments:
469 * card Pointer to card struct
470 * cfg_data Destination structure to unpack into.
471 * cfg_data_buf Source buffer to read from. This should be the raw
472 * data read from UniFi.
473 *
474 * Returns:
475 * None.
476 * ---------------------------------------------------------------------------
477 */
478static void _build_sdio_config_data(sdio_config_data_t *cfg_data,
479 const u8 *cfg_data_buf)
480{
481 s16 offset = 0;
482
483 cfg_data->version = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
484 offset += SIZEOF_UINT16;
485
486 cfg_data->sdio_ctrl_offset = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
487 offset += SIZEOF_UINT16;
488
489 cfg_data->fromhost_sigbuf_handle = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
490 offset += SIZEOF_UINT16;
491
492 cfg_data->tohost_sigbuf_handle = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
493 offset += SIZEOF_UINT16;
494
495 cfg_data->num_fromhost_sig_frags = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
496 offset += SIZEOF_UINT16;
497
498 cfg_data->num_tohost_sig_frags = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
499 offset += SIZEOF_UINT16;
500
501 cfg_data->num_fromhost_data_slots = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
502 offset += SIZEOF_UINT16;
503
504 cfg_data->num_tohost_data_slots = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
505 offset += SIZEOF_UINT16;
506
507 cfg_data->data_slot_size = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
508 offset += SIZEOF_UINT16;
509
510 cfg_data->initialised = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
511 offset += SIZEOF_UINT16;
512
513 cfg_data->overlay_size = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
514 offset += SIZEOF_UINT32;
515
516 cfg_data->data_slot_round = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
517 offset += SIZEOF_UINT16;
518
519 cfg_data->sig_frag_size = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
520 offset += SIZEOF_UINT16;
521
522 cfg_data->tohost_signal_padding = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
523} /* _build_sdio_config_data() */
524
525
526/*
527 * - Function ----------------------------------------------------------------
528 * card_hw_init()
529 *
530 * Perform the initialisation procedure described in the UniFi Host
531 * Interface Protocol document (section 3.3.8) and read the run-time
532 * configuration information from the UniFi. This is stuff like number
533 * of bulk data slots etc.
534 *
535 * The card enumeration and SD initialisation has already been done by
536 * the SDIO library, see card_sdio_init().
537 *
538 * The initialisation is done when firmware is ready, i.e. this may need
539 * to be called after a f/w download operation.
540 *
541 * The initialisation procedure goes like this:
542 * - Wait for UniFi to start-up by polling SHARED_MAILBOX1
543 * - Find the symbol table and look up SLT_SDIO_SLOT_CONFIG
544 * - Read the config structure
545 * - Check the "SDIO initialised" flag, if not zero do a h/w reset and
546 * start again
547 * - Decide the number of bulk data slots to allocate, allocate them and
548 * set "SDIO initialised" flag (and generate an interrupt) to say so.
549 *
550 * Arguments:
551 * card Pointer to card struct
552 *
553 * Returns:
554 * CSR_RESULT_SUCEESS on success,
555 * a CSR error code on failure
556 *
557 * Notes:
558 * All data in the f/w is stored in a little endian format, without any
559 * padding bytes. Every read from this memory has to be transformed in
560 * host (cpu specific) format, before it is stored in driver's parameters
561 * or/and structures. Athough unifi_card_read16() and unifi_read32() do perform
562 * the conversion internally, unifi_readn() does not.
563 * ---------------------------------------------------------------------------
564 */
565static CsrResult card_hw_init(card_t *card)
566{
567 u32 slut_address;
568 u16 initialised;
569 u16 finger_print;
570 symbol_t slut;
571 sdio_config_data_t *cfg_data;
572 u8 cfg_data_buf[SDIO_CONFIG_DATA_SIZE];
573 CsrResult r;
574 void *dlpriv;
575 s16 major, minor;
576 s16 search_4slut_again;
577 CsrResult csrResult;
578
579 /*
580 * The device revision from the TPLMID_MANF and TPLMID_CARD fields
581 * of the CIS are available as
582 * card->sdio_if->pDevice->ManfID
583 * card->sdio_if->pDevice->AppID
584 */
585
586 /*
587 * Run in a loop so we can patch.
588 */
589 do
590 {
591 /* Reset these each time around the loop. */
592 search_4slut_again = 0;
593 cfg_data = NULL;
594
595 r = card_wait_for_firmware_to_start(card, &slut_address);
596 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
597 {
598 return r;
599 }
600 if (r != CSR_RESULT_SUCCESS)
601 {
602 unifi_error(card->ospriv, "Firmware hasn't started\n");
603 return r;
604 }
605 unifi_trace(card->ospriv, UDBG4, "SLUT addr 0x%lX\n", slut_address);
606
607 /*
608 * Firmware has started, but doesn't know full clock configuration yet
609 * as some of the information may be in the MIB. Therefore we set an
610 * initial SDIO clock speed, faster than UNIFI_SDIO_CLOCK_SAFE_HZ, for
611 * the patch download and subsequent firmware initialisation, and
612 * full speed UNIFI_SDIO_CLOCK_MAX_HZ will be set once the f/w tells us
613 * that it is ready.
614 */
615 csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_INIT_HZ);
616 if (csrResult != CSR_RESULT_SUCCESS)
617 {
618 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
619 return r;
620 }
621 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_INIT_HZ;
622
623 /*
624 * Check the SLUT fingerprint.
625 * The slut_address is a generic pointer so we must use unifi_card_read16().
626 */
627 unifi_trace(card->ospriv, UDBG4, "Looking for SLUT finger print\n");
628 finger_print = 0;
629 r = unifi_card_read16(card, slut_address, &finger_print);
630 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
631 {
632 return r;
633 }
634 if (r != CSR_RESULT_SUCCESS)
635 {
636 unifi_error(card->ospriv, "Failed to read SLUT finger print\n");
637 return r;
638 }
639
640 if (finger_print != SLUT_FINGERPRINT)
641 {
642 unifi_error(card->ospriv, "Failed to find Symbol lookup table fingerprint\n");
643 return CSR_RESULT_FAILURE;
644 }
645
646 /* Symbol table starts imedately after the fingerprint */
647 slut_address += 2;
648
649 /* Search the table until either the end marker is found, or the
650 * loading of patch firmware invalidates the current table.
651 */
652 while (!search_4slut_again)
653 {
654 u16 s;
655 u32 l;
656
657 r = unifi_card_read16(card, slut_address, &s);
658 if (r != CSR_RESULT_SUCCESS)
659 {
660 return r;
661 }
662 slut_address += 2;
663
664 if (s == CSR_SLT_END)
665 {
666 unifi_trace(card->ospriv, UDBG3, " found CSR_SLT_END\n");
667 break;
668 }
669
670 r = unifi_read32(card, slut_address, &l);
671 if (r != CSR_RESULT_SUCCESS)
672 {
673 return r;
674 }
675 slut_address += 4;
676
677 slut.id = s;
678 slut.obj = l;
679
680 unifi_trace(card->ospriv, UDBG3, " found SLUT id %02d.%08lx\n", slut.id, slut.obj);
681 switch (slut.id)
682 {
683 case CSR_SLT_SDIO_SLOT_CONFIG:
684 cfg_data = &card->config_data;
685 /*
686 * unifi_card_readn reads n bytes from the card, where data is stored
687 * in a little endian format, without any padding bytes. So, we
688 * can not just pass the cfg_data pointer or use the
689 * sizeof(sdio_config_data_t) since the structure in the host can
690 * be big endian formatted or have padding bytes for alignment.
691 * We use a char buffer to read the data from the card.
692 */
693 r = unifi_card_readn(card, slut.obj, cfg_data_buf, SDIO_CONFIG_DATA_SIZE);
694 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
695 {
696 return r;
697 }
698 if (r != CSR_RESULT_SUCCESS)
699 {
700 unifi_error(card->ospriv, "Failed to read config data\n");
701 return r;
702 }
703 /* .. and then we copy the data to the host structure */
704 _build_sdio_config_data(cfg_data, cfg_data_buf);
705
706 /* Make sure the from host data slots are what we expect
707 we reserve 2 for commands and there should be at least
708 1 left for each access category */
709 if ((cfg_data->num_fromhost_data_slots < UNIFI_RESERVED_COMMAND_SLOTS)
710 || (cfg_data->num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS) / UNIFI_NO_OF_TX_QS == 0)
711 {
712 unifi_error(card->ospriv, "From host data slots %d\n", cfg_data->num_fromhost_data_slots);
713 unifi_error(card->ospriv, "need to be (queues * x + 2) (UNIFI_RESERVED_COMMAND_SLOTS for commands)\n");
714 return CSR_RESULT_FAILURE;
715 }
716
717 /* Configure SDIO to-block-size padding */
718 if (card->sdio_io_block_pad)
719 {
720 /*
721 * Firmware limits the maximum padding size via data_slot_round.
722 * Therefore when padding to whole block sizes, the block size
723 * must be configured correctly by adjusting CSR_WIFI_HIP_SDIO_BLOCK_SIZE.
724 */
725 if (cfg_data->data_slot_round < card->sdio_io_block_size)
726 {
727 unifi_error(card->ospriv,
728 "Configuration error: Block size of %d exceeds f/w data_slot_round of %d\n",
729 card->sdio_io_block_size, cfg_data->data_slot_round);
730 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
731 }
732
733 /*
734 * To force the To-Host signals to be rounded up to the SDIO block
735 * size, we need to write the To-Host Signal Padding Fragments
736 * field of the SDIO configuration in UniFi.
737 */
738 if ((card->sdio_io_block_size % cfg_data->sig_frag_size) != 0)
739 {
740 unifi_error(card->ospriv, "Configuration error: Can not pad to-host signals.\n");
741 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
742 }
743 cfg_data->tohost_signal_padding = (u16) (card->sdio_io_block_size / cfg_data->sig_frag_size);
744 unifi_info(card->ospriv, "SDIO block size %d requires %d padding chunks\n",
745 card->sdio_io_block_size, cfg_data->tohost_signal_padding);
746 r = unifi_card_write16(card, slut.obj + SDIO_TO_HOST_SIG_PADDING_OFFSET, cfg_data->tohost_signal_padding);
747 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
748 {
749 return r;
750 }
751 if (r != CSR_RESULT_SUCCESS)
752 {
753 unifi_error(card->ospriv, "Failed to write To-Host Signal Padding Fragments\n");
754 return r;
755 }
756 }
757
758 /* Reconstruct the Generic Pointer address of the
759 * SDIO Control Data Struct.
760 */
761 card->sdio_ctrl_addr = cfg_data->sdio_ctrl_offset | (UNIFI_SH_DMEM << 24);
762 card->init_flag_addr = slut.obj + SDIO_INIT_FLAG_OFFSET;
763 break;
764
765 case CSR_SLT_BUILD_ID_NUMBER:
766 {
767 u32 n;
768 r = unifi_read32(card, slut.obj, &n);
769 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
770 {
771 return r;
772 }
773 if (r != CSR_RESULT_SUCCESS)
774 {
775 unifi_error(card->ospriv, "Failed to read build id\n");
776 return r;
777 }
778 card->build_id = n;
779 }
780 break;
781
782 case CSR_SLT_BUILD_ID_STRING:
783 r = unifi_readnz(card, slut.obj, card->build_id_string,
784 sizeof(card->build_id_string));
785 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
786 {
787 return r;
788 }
789 if (r != CSR_RESULT_SUCCESS)
790 {
791 unifi_error(card->ospriv, "Failed to read build string\n");
792 return r;
793 }
794 break;
795
796 case CSR_SLT_PERSISTENT_STORE_DB:
797 break;
798
799 case CSR_SLT_BOOT_LOADER_CONTROL:
800
801 /* This command copies most of the station firmware
802 * image from ROM into program RAM. It also clears
803 * out the zerod data and sets up the initialised
804 * data. */
805 r = unifi_do_loader_op(card, slut.obj + 6, UNIFI_BOOT_LOADER_LOAD_STA);
806 if (r != CSR_RESULT_SUCCESS)
807 {
808 unifi_error(card->ospriv, "Failed to write loader load image command\n");
809 return r;
810 }
811
812 dlpriv = unifi_dl_fw_read_start(card, UNIFI_FW_STA);
813
814 /* dlpriv might be NULL, we still need to do the do_loader_op step. */
815 if (dlpriv != NULL)
816 {
817 /* Download the firmware. */
818 r = unifi_dl_patch(card, dlpriv, slut.obj);
819
820 /* Free the firmware file information. */
821 unifi_fw_read_stop(card->ospriv, dlpriv);
822
823 if (r != CSR_RESULT_SUCCESS)
824 {
825 unifi_error(card->ospriv, "Failed to patch firmware\n");
826 return r;
827 }
828 }
829
830 /* This command starts the firmware image that we want (the
831 * station by default) with any patches required applied. */
832 r = unifi_do_loader_op(card, slut.obj + 6, UNIFI_BOOT_LOADER_RESTART);
833 if (r != CSR_RESULT_SUCCESS)
834 {
835 unifi_error(card->ospriv, "Failed to write loader restart command\n");
836 return r;
837 }
838
839 /* The now running patch f/w defines a new SLUT data structure -
840 * the current one is no longer valid. We must drop out of the
841 * processing loop and enumerate the new SLUT (which may appear
842 * at a different offset).
843 */
844 search_4slut_again = 1;
845 break;
846
847 case CSR_SLT_PANIC_DATA_PHY:
848 card->panic_data_phy_addr = slut.obj;
849 break;
850
851 case CSR_SLT_PANIC_DATA_MAC:
852 card->panic_data_mac_addr = slut.obj;
853 break;
854
855 default:
856 /* do nothing */
857 break;
858 }
859 } /* while */
860 } while (search_4slut_again);
861
862 /* Did we find the Config Data ? */
863 if (cfg_data == NULL)
864 {
865 unifi_error(card->ospriv, "Failed to find SDIO_SLOT_CONFIG Symbol\n");
866 return CSR_RESULT_FAILURE;
867 }
868
869 /*
870 * Has ths card already been initialised?
871 * If so, return an error so we do a h/w reset and start again.
872 */
873 r = unifi_card_read16(card, card->init_flag_addr, &initialised);
874 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
875 {
876 return r;
877 }
878 if (r != CSR_RESULT_SUCCESS)
879 {
880 unifi_error(card->ospriv, "Failed to read init flag at %08lx\n",
881 card->init_flag_addr);
882 return r;
883 }
884 if (initialised != 0)
885 {
886 return CSR_RESULT_FAILURE;
887 }
888
889
890 /*
891 * Now check the UniFi firmware version
892 */
893 major = (cfg_data->version >> 8) & 0xFF;
894 minor = cfg_data->version & 0xFF;
895 unifi_info(card->ospriv, "UniFi f/w protocol version %d.%d (driver %d.%d)\n",
896 major, minor,
897 UNIFI_HIP_MAJOR_VERSION, UNIFI_HIP_MINOR_VERSION);
898
899 unifi_info(card->ospriv, "Firmware build %u: %s\n",
900 card->build_id, card->build_id_string);
901
902 if (major != UNIFI_HIP_MAJOR_VERSION)
903 {
904 unifi_error(card->ospriv, "UniFi f/w protocol major version (%d) is different from driver (v%d.%d)\n",
905 major, UNIFI_HIP_MAJOR_VERSION, UNIFI_HIP_MINOR_VERSION);
906#ifndef CSR_WIFI_DISABLE_HIP_VERSION_CHECK
907 return CSR_RESULT_FAILURE;
908#endif
909 }
910 if (minor < UNIFI_HIP_MINOR_VERSION)
911 {
912 unifi_error(card->ospriv, "UniFi f/w protocol version (v%d.%d) is older than minimum required by driver (v%d.%d).\n",
913 major, minor,
914 UNIFI_HIP_MAJOR_VERSION, UNIFI_HIP_MINOR_VERSION);
915#ifndef CSR_WIFI_DISABLE_HIP_VERSION_CHECK
916 return CSR_RESULT_FAILURE;
917#endif
918 }
919
920 /* Read panic codes from a previous firmware panic. If the firmware has
921 * not panicked since power was applied (e.g. power-off hard reset)
922 * the stored panic codes will not be updated.
923 */
924 unifi_read_panic(card);
925
926 return CSR_RESULT_SUCCESS;
927} /* card_hw_init() */
928
929
930/*
931 * ---------------------------------------------------------------------------
932 * card_wait_for_unifi_to_reset
933 *
934 * Waits for a reset to complete by polling the WLAN function enable
935 * bit (which is cleared on reset).
936 *
937 * Arguments:
938 * card Pointer to card struct
939 *
940 * Returns:
941 * CSR_RESULT_SUCCESS on success, CSR error code on failure.
942 * ---------------------------------------------------------------------------
943 */
944static CsrResult card_wait_for_unifi_to_reset(card_t *card)
945{
946 s16 i;
947 CsrResult r;
948 u8 io_enable;
949 CsrResult csrResult;
950
951 r = CSR_RESULT_SUCCESS;
952 for (i = 0; i < MAILBOX2_ATTEMPTS; i++)
953 {
954 unifi_trace(card->ospriv, UDBG1, "waiting for reset to complete, attempt %d\n", i);
955 if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
956 {
957 /* It's quite likely that this read will timeout for the
958 * first few tries - especially if we have reset via
959 * DBG_RESET.
960 */
961#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
962 unifi_debug_log_to_buf("m0@%02X=", SDIO_IO_READY);
963#endif
964 csrResult = CsrSdioF0Read8(card->sdio_if, SDIO_IO_READY, &io_enable);
965#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
966 if (csrResult != CSR_RESULT_SUCCESS)
967 {
968 unifi_debug_log_to_buf("error=%X\n", csrResult);
969 }
970 else
971 {
972 unifi_debug_log_to_buf("%X\n", io_enable);
973 }
974#endif
975 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
976 {
977 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
978 }
979 r = CSR_RESULT_SUCCESS;
980 if (csrResult != CSR_RESULT_SUCCESS)
981 {
982 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
983 }
984 }
985 else
986 {
987 r = sdio_read_f0(card, SDIO_IO_ENABLE, &io_enable);
988 }
989 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
990 {
991 return r;
992 }
993 if (r == CSR_RESULT_SUCCESS)
994 {
995 u16 mbox2;
996 s16 enabled = io_enable & (1 << card->function);
997
998 if (!enabled)
999 {
1000 unifi_trace(card->ospriv, UDBG1,
1001 "Reset complete (function %d is disabled) in ~ %u msecs\n",
1002 card->function, i * MAILBOX2_TIMEOUT);
1003
1004 /* Enable WLAN function and verify MAILBOX2 is zero'd */
1005 csrResult = CsrSdioFunctionEnable(card->sdio_if);
1006 if (csrResult != CSR_RESULT_SUCCESS)
1007 {
1008 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
1009 unifi_error(card->ospriv, "CsrSdioFunctionEnable failed %d\n", r);
1010 break;
1011 }
1012 }
1013
1014 r = unifi_read_direct16(card, ChipHelper_SDIO_HIP_HANDSHAKE(card->helper) * 2, &mbox2);
1015 if (r != CSR_RESULT_SUCCESS)
1016 {
1017 unifi_error(card->ospriv, "read HIP_HANDSHAKE failed %d\n", r);
1018 break;
1019 }
1020 if (mbox2 != 0)
1021 {
1022 unifi_error(card->ospriv, "MAILBOX2 non-zero after reset (mbox2 = %04x)\n", mbox2);
1023 r = CSR_RESULT_FAILURE;
1024 }
1025 break;
1026 }
1027 else
1028 {
1029 if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
1030 {
1031 /* We ignore read failures for the first few reads,
1032 * they are probably benign. */
1033 if (i > MAILBOX2_ATTEMPTS / 4)
1034 {
1035 unifi_trace(card->ospriv, UDBG1, "Failed to read CCCR IO Ready register while polling for reset\n");
1036 }
1037 }
1038 else
1039 {
1040 unifi_trace(card->ospriv, UDBG1, "Failed to read CCCR IO Enable register while polling for reset\n");
1041 }
1042 }
1043 CsrThreadSleep(MAILBOX2_TIMEOUT);
1044 }
1045
1046 if (r == CSR_RESULT_SUCCESS && i == MAILBOX2_ATTEMPTS)
1047 {
1048 unifi_trace(card->ospriv, UDBG1, "Timeout waiting for UniFi to complete reset\n");
1049 r = CSR_RESULT_FAILURE;
1050 }
1051
1052 return r;
1053} /* card_wait_for_unifi_to_reset() */
1054
1055
1056/*
1057 * ---------------------------------------------------------------------------
1058 * card_wait_for_unifi_to_disable
1059 *
1060 * Waits for the function to become disabled by polling the
1061 * IO_READY bit.
1062 *
1063 * Arguments:
1064 * card Pointer to card struct
1065 *
1066 * Returns:
1067 * CSR_RESULT_SUCCESS on success, CSR error code on failure.
1068 *
1069 * Notes: This function can only be used with
1070 * card->chip_id > SDIO_CARD_ID_UNIFI_2
1071 * ---------------------------------------------------------------------------
1072 */
1073static CsrResult card_wait_for_unifi_to_disable(card_t *card)
1074{
1075 s16 i;
1076 CsrResult r;
1077 u8 io_enable;
1078 CsrResult csrResult;
1079
1080 if (card->chip_id <= SDIO_CARD_ID_UNIFI_2)
1081 {
1082 unifi_error(card->ospriv,
1083 "Function reset method not supported for chip_id=%d\n",
1084 card->chip_id);
1085 return CSR_RESULT_FAILURE;
1086 }
1087
1088 r = CSR_RESULT_SUCCESS;
1089 for (i = 0; i < MAILBOX2_ATTEMPTS; i++)
1090 {
1091 unifi_trace(card->ospriv, UDBG1, "waiting for disable to complete, attempt %d\n", i);
1092
1093 /*
1094 * It's quite likely that this read will timeout for the
1095 * first few tries - especially if we have reset via
1096 * DBG_RESET.
1097 */
1098#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
1099 unifi_debug_log_to_buf("r0@%02X=", SDIO_IO_READY);
1100#endif
1101 csrResult = CsrSdioF0Read8(card->sdio_if, SDIO_IO_READY, &io_enable);
1102#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
1103 if (csrResult != CSR_RESULT_SUCCESS)
1104 {
1105 unifi_debug_log_to_buf("error=%X\n", csrResult);
1106 }
1107 else
1108 {
1109 unifi_debug_log_to_buf("%X\n", io_enable);
1110 }
1111#endif
1112 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
1113 {
1114 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
1115 }
1116 if (csrResult == CSR_RESULT_SUCCESS)
1117 {
1118 s16 enabled = io_enable & (1 << card->function);
1119 r = CSR_RESULT_SUCCESS;
1120 if (!enabled)
1121 {
1122 unifi_trace(card->ospriv, UDBG1,
1123 "Disable complete (function %d is disabled) in ~ %u msecs\n",
1124 card->function, i * MAILBOX2_TIMEOUT);
1125
1126 break;
1127 }
1128 }
1129 else
1130 {
1131 /*
1132 * We ignore read failures for the first few reads,
1133 * they are probably benign.
1134 */
1135 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
1136 if (i > (MAILBOX2_ATTEMPTS / 4))
1137 {
1138 unifi_trace(card->ospriv, UDBG1,
1139 "Failed to read CCCR IO Ready register while polling for disable\n");
1140 }
1141 }
1142 CsrThreadSleep(MAILBOX2_TIMEOUT);
1143 }
1144
1145 if ((r == CSR_RESULT_SUCCESS) && (i == MAILBOX2_ATTEMPTS))
1146 {
1147 unifi_trace(card->ospriv, UDBG1, "Timeout waiting for UniFi to complete disable\n");
1148 r = CSR_RESULT_FAILURE;
1149 }
1150
1151 return r;
1152} /* card_wait_for_unifi_to_reset() */
1153
1154
1155/*
1156 * ---------------------------------------------------------------------------
1157 * card_wait_for_firmware_to_start
1158 *
1159 * Polls the MAILBOX1 register for a non-zero value.
1160 * Then reads MAILBOX0 and forms the two values into a 32-bit address
1161 * which is returned to the caller.
1162 *
1163 * Arguments:
1164 * card Pointer to card struct
1165 * paddr Pointer to receive the UniFi address formed
1166 * by concatenating MAILBOX1 and MAILBOX0.
1167 *
1168 * Returns:
1169 * CSR_RESULT_SUCCESS on success, CSR error code on failure.
1170 * ---------------------------------------------------------------------------
1171 */
1172CsrResult card_wait_for_firmware_to_start(card_t *card, u32 *paddr)
1173{
1174 s32 i;
1175 u16 mbox0, mbox1;
1176 CsrResult r;
1177
1178 /*
1179 * Wait for UniFi to initialise its data structures by polling
1180 * the SHARED_MAILBOX1 register.
1181 * Experience shows this is typically 120ms.
1182 */
1183 CsrThreadSleep(MAILBOX1_TIMEOUT);
1184
1185 mbox1 = 0;
1186 unifi_trace(card->ospriv, UDBG1, "waiting for MAILBOX1 to be non-zero...\n");
1187 for (i = 0; i < MAILBOX1_ATTEMPTS; i++)
1188 {
1189 r = unifi_read_direct16(card, ChipHelper_MAILBOX1(card->helper) * 2, &mbox1);
1190 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1191 {
1192 return r;
1193 }
1194 if (r != CSR_RESULT_SUCCESS)
1195 {
1196 /* These reads can fail if UniFi isn't up yet, so try again */
1197 unifi_warning(card->ospriv, "Failed to read UniFi Mailbox1 register\n");
1198 }
1199
1200 if ((r == CSR_RESULT_SUCCESS) && (mbox1 != 0))
1201 {
1202 unifi_trace(card->ospriv, UDBG1, "MAILBOX1 ready (0x%04X) in %u millisecs\n",
1203 mbox1, i * MAILBOX1_TIMEOUT);
1204
1205 /* Read the MAILBOX1 again in case we caught the value as it
1206 * changed. */
1207 r = unifi_read_direct16(card, ChipHelper_MAILBOX1(card->helper) * 2, &mbox1);
1208 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1209 {
1210 return r;
1211 }
1212 if (r != CSR_RESULT_SUCCESS)
1213 {
1214 unifi_error(card->ospriv, "Failed to read UniFi Mailbox1 register for second time\n");
1215 return r;
1216 }
1217 unifi_trace(card->ospriv, UDBG1, "MAILBOX1 value=0x%04X\n", mbox1);
1218
1219 break;
1220 }
1221
1222 CsrThreadSleep(MAILBOX1_TIMEOUT);
1223 if ((i % 100) == 99)
1224 {
1225 unifi_trace(card->ospriv, UDBG2, "MAILBOX1 not ready (0x%X), still trying...\n", mbox1);
1226 }
1227 }
1228
1229 if ((r == CSR_RESULT_SUCCESS) && (mbox1 == 0))
1230 {
1231 unifi_trace(card->ospriv, UDBG1, "Timeout waiting for firmware to start, Mailbox1 still 0 after %d ms\n",
1232 MAILBOX1_ATTEMPTS * MAILBOX1_TIMEOUT);
1233 return CSR_RESULT_FAILURE;
1234 }
1235
1236
1237 /*
1238 * Complete the reset handshake by setting MAILBOX2 to 0xFFFF
1239 */
1240 r = unifi_write_direct16(card, ChipHelper_SDIO_HIP_HANDSHAKE(card->helper) * 2, 0xFFFF);
1241 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1242 {
1243 return r;
1244 }
1245 if (r != CSR_RESULT_SUCCESS)
1246 {
1247 unifi_error(card->ospriv, "Failed to write f/w startup handshake to MAILBOX2\n");
1248 return r;
1249 }
1250
1251
1252 /*
1253 * Read the Symbol Look Up Table (SLUT) offset.
1254 * Top 16 bits are in mbox1, read the lower 16 bits from mbox0.
1255 */
1256 mbox0 = 0;
1257 r = unifi_read_direct16(card, ChipHelper_MAILBOX0(card->helper) * 2, &mbox0);
1258 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1259 {
1260 return r;
1261 }
1262 if (r != CSR_RESULT_SUCCESS)
1263 {
1264 unifi_error(card->ospriv, "Failed to read UniFi Mailbox0 register\n");
1265 return r;
1266 }
1267
1268 *paddr = (((u32)mbox1 << 16) | mbox0);
1269
1270 return CSR_RESULT_SUCCESS;
1271} /* card_wait_for_firmware_to_start() */
1272
1273
1274/*
1275 * ---------------------------------------------------------------------------
1276 * unifi_capture_panic
1277 *
1278 * Attempt to capture panic codes from the firmware. This may involve
1279 * warm reset of the chip to regain access following a watchdog reset.
1280 *
1281 * Arguments:
1282 * card Pointer to card struct
1283 *
1284 * Returns:
1285 * CSR_RESULT_SUCCESS if panic codes were captured, or none available
1286 * CSR_RESULT_FAILURE if the driver could not access function 1
1287 * ---------------------------------------------------------------------------
1288 */
1289CsrResult unifi_capture_panic(card_t *card)
1290{
1291
1292 /* The firmware must have previously initialised to read the panic addresses
1293 * from the SLUT
1294 */
1295 if (!card->panic_data_phy_addr || !card->panic_data_mac_addr)
1296 {
1297 return CSR_RESULT_SUCCESS;
1298 }
1299
1300 /* Ensure we can access function 1 following a panic/watchdog reset */
1301 if (card_access_panic(card) == CSR_RESULT_SUCCESS)
1302 {
1303 /* Read the panic codes */
1304 unifi_read_panic(card);
1305 }
1306 else
1307 {
1308 unifi_info(card->ospriv, "Unable to read panic codes");
1309 }
1310
1311 return CSR_RESULT_SUCCESS;
1312}
1313
1314
1315/*
1316 * ---------------------------------------------------------------------------
1317 * card_access_panic
1318 * Attempt to read the WLAN SDIO function in order to read panic codes
1319 * and perform various reset steps to regain access if the read fails.
1320 *
1321 * Arguments:
1322 * card Pointer to card struct
1323 *
1324 * Returns:
1325 * CSR_RESULT_SUCCESS if panic codes can be read
1326 * CSR error code if panic codes can not be read
1327 * ---------------------------------------------------------------------------
1328 */
1329static CsrResult card_access_panic(card_t *card)
1330{
1331 u16 data_u16 = 0;
1332 s32 i;
1333 CsrResult r, sr;
1334
1335 /* A chip version of zero means that the version never got successfully read
1336 * during reset. In this case give up because it will not be possible to
1337 * verify the chip version.
1338 */
1339 if (!card->chip_version)
1340 {
1341 unifi_info(card->ospriv, "Unknown chip version\n");
1342 return CSR_RESULT_FAILURE;
1343 }
1344
1345 /* Ensure chip is awake or access to function 1 will fail */
1346 r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
1347 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1348 {
1349 return r;
1350 }
1351 if (r != CSR_RESULT_SUCCESS)
1352 {
1353 unifi_error(card->ospriv, "unifi_set_host_state() failed %d\n", r);
1354 return CSR_RESULT_FAILURE; /* Card is probably unpowered */
1355 }
1356 CsrThreadSleep(20);
1357
1358 for (i = 0; i < 3; i++)
1359 {
1360 sr = CsrSdioRead16(card->sdio_if, CHIP_HELPER_UNIFI_GBL_CHIP_VERSION * 2, &data_u16);
1361 if (sr != CSR_RESULT_SUCCESS || data_u16 != card->chip_version)
1362 {
1363 unifi_info(card->ospriv, "Failed to read valid chip version sr=%d (0x%04x want 0x%04x) try %d\n",
1364 sr, data_u16, card->chip_version, i);
1365
1366 /* Set clock speed low */
1367 sr = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_SAFE_HZ);
1368 if (sr != CSR_RESULT_SUCCESS)
1369 {
1370 unifi_error(card->ospriv, "CsrSdioMaxBusClockFrequencySet() failed1 %d\n", sr);
1371 r = ConvertCsrSdioToCsrHipResult(card, sr);
1372 }
1373 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
1374
1375 /* First try re-enabling function in case a f/w watchdog reset disabled it */
1376 if (i == 0)
1377 {
1378 unifi_info(card->ospriv, "Try function enable\n");
1379 sr = CsrSdioFunctionEnable(card->sdio_if);
1380 if (sr != CSR_RESULT_SUCCESS)
1381 {
1382 r = ConvertCsrSdioToCsrHipResult(card, sr);
1383 unifi_error(card->ospriv, "CsrSdioFunctionEnable failed %d (HIP %d)\n", sr, r);
1384 }
1385 continue;
1386 }
1387
1388 /* Second try, set awake */
1389 unifi_info(card->ospriv, "Try set awake\n");
1390
1391 /* Ensure chip is awake */
1392 r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
1393 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1394 {
1395 return r;
1396 }
1397 if (r != CSR_RESULT_SUCCESS)
1398 {
1399 unifi_error(card->ospriv, "unifi_set_host_state() failed2 %d\n", r);
1400 }
1401
1402 /* Set clock speed low in case setting the host state raised it, which
1403 * would only happen if host state was previously TORPID
1404 */
1405 sr = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_SAFE_HZ);
1406 if (sr != CSR_RESULT_SUCCESS)
1407 {
1408 unifi_error(card->ospriv, "CsrSdioMaxBusClockFrequencySet() failed2 %d\n", sr);
1409 }
1410 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
1411
1412 if (i == 1)
1413 {
1414 continue;
1415 }
1416
1417 /* Perform a s/w reset to preserve as much as the card state as possible,
1418 * (mainly the preserve RAM). The context will be lost for coredump - but as we
1419 * were unable to access the WLAN function for panic, the coredump would have
1420 * also failed without a reset.
1421 */
1422 unifi_info(card->ospriv, "Try s/w reset\n");
1423
1424 r = unifi_card_hard_reset(card);
1425 if (r != CSR_RESULT_SUCCESS)
1426 {
1427 unifi_error(card->ospriv, "unifi_card_hard_reset() failed %d\n", r);
1428 }
1429 }
1430 else
1431 {
1432 if (i > 0)
1433 {
1434 unifi_info(card->ospriv, "Read chip version 0x%x after %d retries\n", data_u16, i);
1435 }
1436 break;
1437 }
1438 }
1439
1440 r = ConvertCsrSdioToCsrHipResult(card, sr);
1441 return r;
1442}
1443
1444
1445/*
1446 * ---------------------------------------------------------------------------
1447 * unifi_read_panic
1448 * Reads, saves and prints panic codes stored by the firmware in UniFi's
1449 * preserve RAM by the last panic that occurred since chip was powered.
1450 * Nothing is saved if the panic codes are read as zero.
1451 *
1452 * Arguments:
1453 * card Pointer to card struct
1454 *
1455 * Returns:
1456 * ---------------------------------------------------------------------------
1457 */
1458void unifi_read_panic(card_t *card)
1459{
1460 CsrResult r;
1461 u16 p_code, p_arg;
1462
1463 /* The firmware must have previously initialised to read the panic addresses
1464 * from the SLUT
1465 */
1466 if (!card->panic_data_phy_addr || !card->panic_data_mac_addr)
1467 {
1468 return;
1469 }
1470
1471 /* Get the panic data from PHY */
1472 r = unifi_card_read16(card, card->panic_data_phy_addr, &p_code);
1473 if (r != CSR_RESULT_SUCCESS)
1474 {
1475 unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_phy_addr, r);
1476 p_code = 0;
1477 }
1478 if (p_code)
1479 {
1480 r = unifi_card_read16(card, card->panic_data_phy_addr + 2, &p_arg);
1481 if (r != CSR_RESULT_SUCCESS)
1482 {
1483 unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_phy_addr + 2, r);
1484 }
1485 unifi_error(card->ospriv, "Last UniFi PHY PANIC %04x arg %04x\n", p_code, p_arg);
1486 card->last_phy_panic_code = p_code;
1487 card->last_phy_panic_arg = p_arg;
1488 }
1489
1490 /* Get the panic data from MAC */
1491 r = unifi_card_read16(card, card->panic_data_mac_addr, &p_code);
1492 if (r != CSR_RESULT_SUCCESS)
1493 {
1494 unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_mac_addr, r);
1495 p_code = 0;
1496 }
1497 if (p_code)
1498 {
1499 r = unifi_card_read16(card, card->panic_data_mac_addr + 2, &p_arg);
1500 if (r != CSR_RESULT_SUCCESS)
1501 {
1502 unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_mac_addr + 2, r);
1503 }
1504 unifi_error(card->ospriv, "Last UniFi MAC PANIC %04x arg %04x\n", p_code, p_arg);
1505 card->last_mac_panic_code = p_code;
1506 card->last_mac_panic_arg = p_arg;
1507 }
1508
1509}
1510
1511
1512/*
1513 * ---------------------------------------------------------------------------
1514 * card_allocate_memory_resources
1515 *
1516 * Allocates memory for the from-host, to-host bulk data slots,
1517 * soft queue buffers and bulk data buffers.
1518 *
1519 * Arguments:
1520 * card Pointer to card struct
1521 *
1522 * Returns:
1523 * CSR_RESULT_SUCCESS on success, CSR error code on failure.
1524 * ---------------------------------------------------------------------------
1525 */
1526static CsrResult card_allocate_memory_resources(card_t *card)
1527{
1528 s16 n, i, k, r;
1529 sdio_config_data_t *cfg_data;
1530
1531 /* Reset any state carried forward from a previous life */
1532 card->fh_command_queue.q_rd_ptr = 0;
1533 card->fh_command_queue.q_wr_ptr = 0;
1534 (void)scnprintf(card->fh_command_queue.name, UNIFI_QUEUE_NAME_MAX_LENGTH,
1535 "fh_cmd_q");
1536 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
1537 {
1538 card->fh_traffic_queue[i].q_rd_ptr = 0;
1539 card->fh_traffic_queue[i].q_wr_ptr = 0;
1540 (void)scnprintf(card->fh_traffic_queue[i].name,
1541 UNIFI_QUEUE_NAME_MAX_LENGTH, "fh_data_q%d", i);
1542 }
1543#ifndef CSR_WIFI_HIP_TA_DISABLE
1544 unifi_ta_sampling_init(card);
1545#endif
1546 /* Convenience short-cut */
1547 cfg_data = &card->config_data;
1548
1549 /*
1550 * Allocate memory for the from-host and to-host signal buffers.
1551 */
1552 card->fh_buffer.buf = kmalloc(UNIFI_FH_BUF_SIZE, GFP_KERNEL);
1553 if (card->fh_buffer.buf == NULL)
1554 {
1555 unifi_error(card->ospriv, "Failed to allocate memory for F-H signals\n");
1556 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
1557 }
1558 card->fh_buffer.bufsize = UNIFI_FH_BUF_SIZE;
1559 card->fh_buffer.ptr = card->fh_buffer.buf;
1560 card->fh_buffer.count = 0;
1561
1562 card->th_buffer.buf = kmalloc(UNIFI_FH_BUF_SIZE, GFP_KERNEL);
1563 if (card->th_buffer.buf == NULL)
1564 {
1565 unifi_error(card->ospriv, "Failed to allocate memory for T-H signals\n");
1566 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
1567 }
1568 card->th_buffer.bufsize = UNIFI_FH_BUF_SIZE;
1569 card->th_buffer.ptr = card->th_buffer.buf;
1570 card->th_buffer.count = 0;
1571
1572
1573 /*
1574 * Allocate memory for the from-host and to-host bulk data slots.
1575 * This is done as separate kmallocs because lots of smaller
1576 * allocations are more likely to succeed than one huge one.
1577 */
1578
1579 /* Allocate memory for the array of pointers */
1580 n = cfg_data->num_fromhost_data_slots;
1581
1582 unifi_trace(card->ospriv, UDBG3, "Alloc from-host resources, %d slots.\n", n);
1583 card->from_host_data = kmalloc(n * sizeof(slot_desc_t), GFP_KERNEL);
1584 if (card->from_host_data == NULL)
1585 {
1586 unifi_error(card->ospriv, "Failed to allocate memory for F-H bulk data array\n");
1587 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
1588 }
1589
1590 /* Initialise from-host bulk data slots */
1591 for (i = 0; i < n; i++)
1592 {
1593 UNIFI_INIT_BULK_DATA(&card->from_host_data[i].bd);
1594 }
1595
1596 /* Allocate memory for the array used for slot host tag mapping */
1597 card->fh_slot_host_tag_record = kmalloc(n * sizeof(u32), GFP_KERNEL);
1598
1599 if (card->fh_slot_host_tag_record == NULL)
1600 {
1601 unifi_error(card->ospriv, "Failed to allocate memory for F-H slot host tag mapping array\n");
1602 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
1603 }
1604
1605 /* Initialise host tag entries for from-host bulk data slots */
1606 for (i = 0; i < n; i++)
1607 {
1608 card->fh_slot_host_tag_record[i] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
1609 }
1610
1611
1612 /* Allocate memory for the array of pointers */
1613 n = cfg_data->num_tohost_data_slots;
1614
1615 unifi_trace(card->ospriv, UDBG3, "Alloc to-host resources, %d slots.\n", n);
1616 card->to_host_data = kmalloc(n * sizeof(bulk_data_desc_t), GFP_KERNEL);
1617 if (card->to_host_data == NULL)
1618 {
1619 unifi_error(card->ospriv, "Failed to allocate memory for T-H bulk data array\n");
1620 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
1621 }
1622
1623 /* Initialise to-host bulk data slots */
1624 for (i = 0; i < n; i++)
1625 {
1626 UNIFI_INIT_BULK_DATA(&card->to_host_data[i]);
1627 }
1628
1629 /*
1630 * Initialise buffers for soft Q
1631 */
1632 for (i = 0; i < UNIFI_SOFT_COMMAND_Q_LENGTH; i++)
1633 {
1634 for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
1635 {
1636 UNIFI_INIT_BULK_DATA(&card->fh_command_q_body[i].bulkdata[r]);
1637 }
1638 }
1639
1640 for (k = 0; k < UNIFI_NO_OF_TX_QS; k++)
1641 {
1642 for (i = 0; i < UNIFI_SOFT_TRAFFIC_Q_LENGTH; i++)
1643 {
1644 for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
1645 {
1646 UNIFI_INIT_BULK_DATA(&card->fh_traffic_q_body[k][i].bulkdata[r]);
1647 }
1648 }
1649 }
1650
1651 card->memory_resources_allocated = 1;
1652
1653 return CSR_RESULT_SUCCESS;
1654} /* card_allocate_memory_resources() */
1655
1656
1657/*
1658 * ---------------------------------------------------------------------------
1659 * unifi_free_bulk_data
1660 *
1661 * Free the data associated to a bulk data structure.
1662 *
1663 * Arguments:
1664 * card Pointer to card struct
1665 * bulk_data_slot Pointer to bulk data structure
1666 *
1667 * Returns:
1668 * None.
1669 *
1670 * ---------------------------------------------------------------------------
1671 */
1672static void unifi_free_bulk_data(card_t *card, bulk_data_desc_t *bulk_data_slot)
1673{
1674 if (bulk_data_slot->data_length != 0)
1675 {
1676 unifi_net_data_free(card->ospriv, bulk_data_slot);
1677 }
1678} /* unifi_free_bulk_data() */
1679
1680
1681/*
1682 * ---------------------------------------------------------------------------
1683 * card_free_memory_resources
1684 *
1685 * Frees memory allocated for the from-host, to-host bulk data slots,
1686 * soft queue buffers and bulk data buffers.
1687 *
1688 * Arguments:
1689 * card Pointer to card struct
1690 *
1691 * Returns:
1692 * None.
1693 * ---------------------------------------------------------------------------
1694 */
1695static void card_free_memory_resources(card_t *card)
1696{
1697
1698 unifi_trace(card->ospriv, UDBG1, "Freeing card memory resources.\n");
1699
1700 /* Clear our internal queues */
1701 unifi_cancel_pending_signals(card);
1702
1703
1704 kfree(card->to_host_data);
1705 card->to_host_data = NULL;
1706
1707 kfree(card->from_host_data);
1708 card->from_host_data = NULL;
1709
1710 /* free the memory for slot host tag mapping array */
1711 kfree(card->fh_slot_host_tag_record);
1712 card->fh_slot_host_tag_record = NULL;
1713
1714 kfree(card->fh_buffer.buf);
1715 card->fh_buffer.ptr = card->fh_buffer.buf = NULL;
1716 card->fh_buffer.bufsize = 0;
1717 card->fh_buffer.count = 0;
1718
1719 kfree(card->th_buffer.buf);
1720 card->th_buffer.ptr = card->th_buffer.buf = NULL;
1721 card->th_buffer.bufsize = 0;
1722 card->th_buffer.count = 0;
1723
1724
1725 card->memory_resources_allocated = 0;
1726
1727} /* card_free_memory_resources() */
1728
1729
1730static void card_init_soft_queues(card_t *card)
1731{
1732 s16 i;
1733
1734 unifi_trace(card->ospriv, UDBG1, "Initialising internal signal queues.\n");
1735 /* Reset any state carried forward from a previous life */
1736 card->fh_command_queue.q_rd_ptr = 0;
1737 card->fh_command_queue.q_wr_ptr = 0;
1738 (void)scnprintf(card->fh_command_queue.name, UNIFI_QUEUE_NAME_MAX_LENGTH,
1739 "fh_cmd_q");
1740 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
1741 {
1742 card->fh_traffic_queue[i].q_rd_ptr = 0;
1743 card->fh_traffic_queue[i].q_wr_ptr = 0;
1744 (void)scnprintf(card->fh_traffic_queue[i].name,
1745 UNIFI_QUEUE_NAME_MAX_LENGTH, "fh_data_q%d", i);
1746 }
1747#ifndef CSR_WIFI_HIP_TA_DISABLE
1748 unifi_ta_sampling_init(card);
1749#endif
1750}
1751
1752
1753/*
1754 * ---------------------------------------------------------------------------
1755 * unifi_cancel_pending_signals
1756 *
1757 * Free the signals and associated bulk data, pending in the core.
1758 *
1759 * Arguments:
1760 * card Pointer to card struct
1761 *
1762 * Returns:
1763 * None.
1764 * ---------------------------------------------------------------------------
1765 */
1766void unifi_cancel_pending_signals(card_t *card)
1767{
1768 s16 i, n, r;
1769
1770 unifi_trace(card->ospriv, UDBG1, "Canceling pending signals.\n");
1771
1772 if (card->to_host_data)
1773 {
1774 /*
1775 * Free any bulk data buffers allocated for the t-h slots
1776 * This will clear all buffers that did not make it to
1777 * unifi_receive_event() before cancel was request.
1778 */
1779 n = card->config_data.num_tohost_data_slots;
1780 unifi_trace(card->ospriv, UDBG3, "Freeing to-host resources, %d slots.\n", n);
1781 for (i = 0; i < n; i++)
1782 {
1783 unifi_free_bulk_data(card, &card->to_host_data[i]);
1784 }
1785 }
1786
1787 /*
1788 * If any of the from-host bulk data has reached the card->from_host_data
1789 * but not UniFi, we need to free the buffers here.
1790 */
1791 if (card->from_host_data)
1792 {
1793 /* Free any bulk data buffers allocated for the f-h slots */
1794 n = card->config_data.num_fromhost_data_slots;
1795 unifi_trace(card->ospriv, UDBG3, "Freeing from-host resources, %d slots.\n", n);
1796 for (i = 0; i < n; i++)
1797 {
1798 unifi_free_bulk_data(card, &card->from_host_data[i].bd);
1799 }
1800
1801 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
1802 {
1803 card->dynamic_slot_data.from_host_used_slots[i] = 0;
1804 card->dynamic_slot_data.from_host_max_slots[i] = 0;
1805 card->dynamic_slot_data.from_host_reserved_slots[i] = 0;
1806 }
1807 }
1808
1809 /*
1810 * Free any bulk data buffers allocated in the soft queues.
1811 * This covers the case where a bulk data pointer has reached the soft queue
1812 * but not the card->from_host_data.
1813 */
1814 unifi_trace(card->ospriv, UDBG3, "Freeing cmd q resources.\n");
1815 for (i = 0; i < UNIFI_SOFT_COMMAND_Q_LENGTH; i++)
1816 {
1817 for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
1818 {
1819 unifi_free_bulk_data(card, &card->fh_command_q_body[i].bulkdata[r]);
1820 }
1821 }
1822
1823 unifi_trace(card->ospriv, UDBG3, "Freeing traffic q resources.\n");
1824 for (n = 0; n < UNIFI_NO_OF_TX_QS; n++)
1825 {
1826 for (i = 0; i < UNIFI_SOFT_TRAFFIC_Q_LENGTH; i++)
1827 {
1828 for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
1829 {
1830 unifi_free_bulk_data(card, &card->fh_traffic_q_body[n][i].bulkdata[r]);
1831 }
1832 }
1833 }
1834
1835 card_init_soft_queues(card);
1836
1837} /* unifi_cancel_pending_signals() */
1838
1839
1840/*
1841 * ---------------------------------------------------------------------------
1842 * unifi_free_card
1843 *
1844 * Free the memory allocated for the card structure and buffers.
1845 *
1846 * Notes:
1847 * The porting layer is responsible for freeing any mini-coredump buffers
1848 * allocated when it called unifi_coredump_init(), by calling
1849 * unifi_coredump_free() before calling this function.
1850 *
1851 * Arguments:
1852 * card Pointer to card struct
1853 *
1854 * Returns:
1855 * None.
1856 * ---------------------------------------------------------------------------
1857 */
1858void unifi_free_card(card_t *card)
1859{
1860#ifdef CSR_PRE_ALLOC_NET_DATA
1861 prealloc_netdata_free(card);
1862#endif
1863 /* Free any memory allocated. */
1864 card_free_memory_resources(card);
1865
1866 /* Warn if caller didn't free coredump buffers */
1867 if (card->dump_buf)
1868 {
1869 unifi_error(card->ospriv, "Caller should call unifi_coredump_free()\n");
1870 unifi_coredump_free(card); /* free anyway to prevent memory leak */
1871 }
1872
1873 kfree(card);
1874
1875} /* unifi_free_card() */
1876
1877
1878/*
1879 * ---------------------------------------------------------------------------
1880 * card_init_slots
1881 *
1882 * Allocate memory for host-side slot data and signal queues.
1883 *
1884 * Arguments:
1885 * card Pointer to card object
1886 *
1887 * Returns:
1888 * CSR error code.
1889 * ---------------------------------------------------------------------------
1890 */
1891static CsrResult card_init_slots(card_t *card)
1892{
1893 CsrResult r;
1894 u8 i;
1895
1896 /* Allocate the buffers we need, only once. */
1897 if (card->memory_resources_allocated == 1)
1898 {
1899 card_free_memory_resources(card);
1900 }
1901 else
1902 {
1903 /* Initialise our internal command and traffic queues */
1904 card_init_soft_queues(card);
1905 }
1906
1907 r = card_allocate_memory_resources(card);
1908 if (r != CSR_RESULT_SUCCESS)
1909 {
1910 unifi_error(card->ospriv, "Failed to allocate card memory resources.\n");
1911 card_free_memory_resources(card);
1912 return r;
1913 }
1914
1915 if (card->sdio_ctrl_addr == 0)
1916 {
1917 unifi_error(card->ospriv, "Failed to find config struct!\n");
1918 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
1919 }
1920
1921 /*
1922 * Set initial counts.
1923 */
1924
1925 card->from_host_data_head = 0;
1926
1927 /* Get initial signal counts from UniFi, in case it has not been reset. */
1928 {
1929 u16 s;
1930
1931 /* Get the from-host-signals-written count */
1932 r = unifi_card_read16(card, card->sdio_ctrl_addr + 0, &s);
1933 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1934 {
1935 return r;
1936 }
1937 if (r != CSR_RESULT_SUCCESS)
1938 {
1939 unifi_error(card->ospriv, "Failed to read from-host sig written count\n");
1940 return r;
1941 }
1942 card->from_host_signals_w = (s16)s;
1943
1944 /* Get the to-host-signals-written count */
1945 r = unifi_card_read16(card, card->sdio_ctrl_addr + 6, &s);
1946 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1947 {
1948 return r;
1949 }
1950 if (r != CSR_RESULT_SUCCESS)
1951 {
1952 unifi_error(card->ospriv, "Failed to read to-host sig read count\n");
1953 return r;
1954 }
1955 card->to_host_signals_r = (s16)s;
1956 }
1957
1958 /* Set Initialised flag. */
1959 r = unifi_card_write16(card, card->init_flag_addr, 0x0001);
1960 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1961 {
1962 return r;
1963 }
1964 if (r != CSR_RESULT_SUCCESS)
1965 {
1966 unifi_error(card->ospriv, "Failed to write initialised flag\n");
1967 return r;
1968 }
1969
1970 /* Dynamic queue reservation */
1971 memset(&card->dynamic_slot_data, 0, sizeof(card_dynamic_slot_t));
1972
1973 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
1974 {
1975 card->dynamic_slot_data.from_host_max_slots[i] = card->config_data.num_fromhost_data_slots -
1976 UNIFI_RESERVED_COMMAND_SLOTS;
1977 card->dynamic_slot_data.queue_stable[i] = FALSE;
1978 }
1979
1980 card->dynamic_slot_data.packets_interval = UNIFI_PACKETS_INTERVAL;
1981
1982 return CSR_RESULT_SUCCESS;
1983} /* card_init_slots() */
1984
1985
1986/*
1987 * ---------------------------------------------------------------------------
1988 * unifi_set_udi_hook
1989 *
1990 * Registers the udi hook that reports the sent signals to the core.
1991 *
1992 * Arguments:
1993 * card Pointer to the card context struct
1994 * udi_fn Pointer to the callback function.
1995 *
1996 * Returns:
1997 * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the card pointer is invalid,
1998 * CSR_RESULT_SUCCESS on success.
1999 * ---------------------------------------------------------------------------
2000 */
2001CsrResult unifi_set_udi_hook(card_t *card, udi_func_t udi_fn)
2002{
2003 if (card == NULL)
2004 {
2005 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
2006 }
2007
2008 if (card->udi_hook == NULL)
2009 {
2010 card->udi_hook = udi_fn;
2011 }
2012
2013 return CSR_RESULT_SUCCESS;
2014} /* unifi_set_udi_hook() */
2015
2016
2017/*
2018 * ---------------------------------------------------------------------------
2019 * unifi_remove_udi_hook
2020 *
2021 * Removes the udi hook that reports the sent signals from the core.
2022 *
2023 * Arguments:
2024 * card Pointer to the card context struct
2025 * udi_fn Pointer to the callback function.
2026 *
2027 * Returns:
2028 * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the card pointer is invalid,
2029 * CSR_RESULT_SUCCESS on success.
2030 * ---------------------------------------------------------------------------
2031 */
2032CsrResult unifi_remove_udi_hook(card_t *card, udi_func_t udi_fn)
2033{
2034 if (card == NULL)
2035 {
2036 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
2037 }
2038
2039 if (card->udi_hook == udi_fn)
2040 {
2041 card->udi_hook = NULL;
2042 }
2043
2044 return CSR_RESULT_SUCCESS;
2045} /* unifi_remove_udi_hook() */
2046
2047
2048static void CardReassignDynamicReservation(card_t *card)
2049{
2050 u8 i;
2051
2052 unifi_trace(card->ospriv, UDBG5, "Packets Txed %d %d %d %d\n",
2053 card->dynamic_slot_data.packets_txed[0],
2054 card->dynamic_slot_data.packets_txed[1],
2055 card->dynamic_slot_data.packets_txed[2],
2056 card->dynamic_slot_data.packets_txed[3]);
2057
2058 /* Clear reservation and recalculate max slots */
2059 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
2060 {
2061 card->dynamic_slot_data.queue_stable[i] = FALSE;
2062 card->dynamic_slot_data.from_host_reserved_slots[i] = 0;
2063 card->dynamic_slot_data.from_host_max_slots[i] = card->config_data.num_fromhost_data_slots -
2064 UNIFI_RESERVED_COMMAND_SLOTS;
2065 card->dynamic_slot_data.packets_txed[i] = 0;
2066
2067 unifi_trace(card->ospriv, UDBG5, "CardReassignDynamicReservation: queue %d reserved %d Max %d\n", i,
2068 card->dynamic_slot_data.from_host_reserved_slots[i],
2069 card->dynamic_slot_data.from_host_max_slots[i]);
2070 }
2071
2072 card->dynamic_slot_data.total_packets_txed = 0;
2073}
2074
2075
2076/* Algorithm to dynamically reserve slots. The logic is based mainly on the outstanding queue
2077 * length. Slots are reserved for particular queues during an interval and cleared after the interval.
2078 * Each queue has three associated variables.. a) used slots - the number of slots currently occupied
2079 * by the queue b) reserved slots - number of slots reserved specifically for the queue c) max slots - total
2080 * slots that this queue can actually use (may be higher than reserved slots and is dependent on reserved slots
2081 * for other queues).
2082 * This function is called when there are no slots available for a queue. It checks to see if there are enough
2083 * unreserved slots sufficient for this request. If available these slots are reserved for the queue.
2084 * If there are not enough unreserved slots, a fair share for each queue is calculated based on the total slots
2085 * and the number of active queues (any queue with existing reservation is considered active). Queues needing
2086 * less than their fair share are allowed to have the previously reserved slots. The remaining slots are
2087 * distributed evenly among queues that need more than the fair share
2088 *
2089 * A better scheme would take current bandwidth per AC into consideration when reserving slots. An
2090 * implementation scheme could consider the relative time/service period for slots in an AC. If the firmware
2091 * services other ACs faster than a particular AC (packets wait in the slots longer) then it is fair to reserve
2092 * less slots for the AC
2093 */
2094static void CardCheckDynamicReservation(card_t *card, unifi_TrafficQueue queue)
2095{
2096 u16 q_len, active_queues = 0, excess_queue_slots, div_extra_slots,
2097 queue_fair_share, reserved_slots = 0, q, excess_need_queues = 0, unmovable_slots = 0;
2098 s32 i;
2099 q_t *sigq;
2100 u16 num_data_slots = card->config_data.num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS;
2101
2102 /* Calculate the pending queue length */
2103 sigq = &card->fh_traffic_queue[queue];
2104 q_len = CSR_WIFI_HIP_Q_SLOTS_USED(sigq);
2105
2106 if (q_len <= card->dynamic_slot_data.from_host_reserved_slots[queue])
2107 {
2108 unifi_trace(card->ospriv, UDBG5, "queue %d q_len %d already has that many reserved slots, exiting\n", queue, q_len);
2109 return;
2110 }
2111
2112 /* Upper limit */
2113 if (q_len > num_data_slots)
2114 {
2115 q_len = num_data_slots;
2116 }
2117
2118 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
2119 {
2120 if (i != (s32)queue)
2121 {
2122 reserved_slots += card->dynamic_slot_data.from_host_reserved_slots[i];
2123 }
2124 if ((i == (s32)queue) || (card->dynamic_slot_data.from_host_reserved_slots[i] > 0))
2125 {
2126 active_queues++;
2127 }
2128 }
2129
2130 unifi_trace(card->ospriv, UDBG5, "CardCheckDynamicReservation: queue %d q_len %d\n", queue, q_len);
2131 unifi_trace(card->ospriv, UDBG5, "Active queues %d reserved slots on other queues %d\n",
2132 active_queues, reserved_slots);
2133
2134 if (reserved_slots + q_len <= num_data_slots)
2135 {
2136 card->dynamic_slot_data.from_host_reserved_slots[queue] = q_len;
2137 if (q_len == num_data_slots)
2138 {
2139 /* This is the common case when just 1 stream is going */
2140 card->dynamic_slot_data.queue_stable[queue] = TRUE;
2141 }
2142 }
2143 else
2144 {
2145 queue_fair_share = num_data_slots / active_queues;
2146 unifi_trace(card->ospriv, UDBG5, "queue fair share %d\n", queue_fair_share);
2147
2148 /* Evenly distribute slots among active queues */
2149 /* Find out the queues that need excess of fair share. Also find slots allocated
2150 * to queues less than their fair share, these slots cannot be reallocated (unmovable slots) */
2151
2152 card->dynamic_slot_data.from_host_reserved_slots[queue] = q_len;
2153
2154 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
2155 {
2156 if (card->dynamic_slot_data.from_host_reserved_slots[i] > queue_fair_share)
2157 {
2158 excess_need_queues++;
2159 }
2160 else
2161 {
2162 unmovable_slots += card->dynamic_slot_data.from_host_reserved_slots[i];
2163 }
2164 }
2165
2166 unifi_trace(card->ospriv, UDBG5, "Excess need queues %d\n", excess_need_queues);
2167
2168 /* Now find the slots per excess demand queue */
2169 excess_queue_slots = (num_data_slots - unmovable_slots) / excess_need_queues;
2170 div_extra_slots = (num_data_slots - unmovable_slots) - excess_queue_slots * excess_need_queues;
2171 for (i = UNIFI_NO_OF_TX_QS - 1; i >= 0; i--)
2172 {
2173 if (card->dynamic_slot_data.from_host_reserved_slots[i] > excess_queue_slots)
2174 {
2175 card->dynamic_slot_data.from_host_reserved_slots[i] = excess_queue_slots;
2176 if (div_extra_slots > 0)
2177 {
2178 card->dynamic_slot_data.from_host_reserved_slots[i]++;
2179 div_extra_slots--;
2180 }
2181 /* No more slots will be allocated to this queue during the current interval */
2182 card->dynamic_slot_data.queue_stable[i] = TRUE;
2183 unifi_trace(card->ospriv, UDBG5, "queue stable %d\n", i);
2184 }
2185 }
2186 }
2187
2188 /* Redistribute max slots */
2189 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
2190 {
2191 reserved_slots = 0;
2192 for (q = 0; q < UNIFI_NO_OF_TX_QS; q++)
2193 {
2194 if (i != q)
2195 {
2196 reserved_slots += card->dynamic_slot_data.from_host_reserved_slots[q];
2197 }
2198 }
2199
2200 card->dynamic_slot_data.from_host_max_slots[i] = num_data_slots - reserved_slots;
2201 unifi_trace(card->ospriv, UDBG5, "queue %d reserved %d Max %d\n", i,
2202 card->dynamic_slot_data.from_host_reserved_slots[i],
2203 card->dynamic_slot_data.from_host_max_slots[i]);
2204 }
2205
2206}
2207
2208
2209/*
2210 * ---------------------------------------------------------------------------
2211 * CardClearFromHostDataSlot
2212 *
2213 * Clear a the given data slot, making it available again.
2214 *
2215 * Arguments:
2216 * card Pointer to Card object
2217 * slot Index of the signal slot to clear.
2218 *
2219 * Returns:
2220 * None.
2221 * ---------------------------------------------------------------------------
2222 */
2223void CardClearFromHostDataSlot(card_t *card, const s16 slot)
2224{
2225 u8 queue = card->from_host_data[slot].queue;
2226 const void *os_data_ptr = card->from_host_data[slot].bd.os_data_ptr;
2227
2228 if (card->from_host_data[slot].bd.data_length == 0)
2229 {
2230 unifi_warning(card->ospriv,
2231 "Surprise: request to clear an already free FH data slot: %d\n",
2232 slot);
2233 return;
2234 }
2235
2236 if (os_data_ptr == NULL)
2237 {
2238 unifi_warning(card->ospriv,
2239 "Clearing FH data slot %d: has null payload, len=%d\n",
2240 slot, card->from_host_data[slot].bd.data_length);
2241 }
2242
2243 /* Free card->from_host_data[slot].bd.os_net_ptr here. */
2244 /* Mark slot as free by setting length to 0. */
2245 unifi_free_bulk_data(card, &card->from_host_data[slot].bd);
2246 if (queue < UNIFI_NO_OF_TX_QS)
2247 {
2248 if (card->dynamic_slot_data.from_host_used_slots[queue] == 0)
2249 {
2250 unifi_error(card->ospriv, "Goofed up used slots q = %d used slots = %d\n",
2251 queue,
2252 card->dynamic_slot_data.from_host_used_slots[queue]);
2253 }
2254 else
2255 {
2256 card->dynamic_slot_data.from_host_used_slots[queue]--;
2257 }
2258 card->dynamic_slot_data.packets_txed[queue]++;
2259 card->dynamic_slot_data.total_packets_txed++;
2260 if (card->dynamic_slot_data.total_packets_txed >= card->dynamic_slot_data.packets_interval)
2261 {
2262 CardReassignDynamicReservation(card);
2263 }
2264 }
2265
2266 unifi_trace(card->ospriv, UDBG4, "CardClearFromHostDataSlot: slot %d recycled %p\n", slot, os_data_ptr);
2267
2268} /* CardClearFromHostDataSlot() */
2269
2270
2271#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
2272/*
2273 * ---------------------------------------------------------------------------
2274 * CardClearFromHostDataSlotWithoutFreeingBulkData
2275 *
2276 * Clear the given data slot with out freeing the bulk data.
2277 *
2278 * Arguments:
2279 * card Pointer to Card object
2280 * slot Index of the signal slot to clear.
2281 *
2282 * Returns:
2283 * None.
2284 * ---------------------------------------------------------------------------
2285 */
2286void CardClearFromHostDataSlotWithoutFreeingBulkData(card_t *card, const s16 slot)
2287{
2288 u8 queue = card->from_host_data[slot].queue;
2289
2290 /* Initialise the from_host data slot so it can be re-used,
2291 * Set length field in from_host_data array to 0.
2292 */
2293 UNIFI_INIT_BULK_DATA(&card->from_host_data[slot].bd);
2294
2295 queue = card->from_host_data[slot].queue;
2296
2297 if (queue < UNIFI_NO_OF_TX_QS)
2298 {
2299 if (card->dynamic_slot_data.from_host_used_slots[queue] == 0)
2300 {
2301 unifi_error(card->ospriv, "Goofed up used slots q = %d used slots = %d\n",
2302 queue,
2303 card->dynamic_slot_data.from_host_used_slots[queue]);
2304 }
2305 else
2306 {
2307 card->dynamic_slot_data.from_host_used_slots[queue]--;
2308 }
2309 card->dynamic_slot_data.packets_txed[queue]++;
2310 card->dynamic_slot_data.total_packets_txed++;
2311 if (card->dynamic_slot_data.total_packets_txed >=
2312 card->dynamic_slot_data.packets_interval)
2313 {
2314 CardReassignDynamicReservation(card);
2315 }
2316 }
2317} /* CardClearFromHostDataSlotWithoutFreeingBulkData() */
2318
2319
2320#endif
2321
2322u16 CardGetDataSlotSize(card_t *card)
2323{
2324 return card->config_data.data_slot_size;
2325} /* CardGetDataSlotSize() */
2326
2327
2328/*
2329 * ---------------------------------------------------------------------------
2330 * CardGetFreeFromHostDataSlots
2331 *
2332 * Retrieve the number of from-host bulk data slots available.
2333 *
2334 * Arguments:
2335 * card Pointer to the card context struct
2336 *
2337 * Returns:
2338 * Number of free from-host bulk data slots.
2339 * ---------------------------------------------------------------------------
2340 */
2341u16 CardGetFreeFromHostDataSlots(card_t *card)
2342{
2343 u16 i, n = 0;
2344
2345 /* First two slots reserved for MLME */
2346 for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
2347 {
2348 if (card->from_host_data[i].bd.data_length == 0)
2349 {
2350 /* Free slot */
2351 n++;
2352 }
2353 }
2354
2355 return n;
2356} /* CardGetFreeFromHostDataSlots() */
2357
2358
2359/*
2360 * ---------------------------------------------------------------------------
2361 * CardAreAllFromHostDataSlotsEmpty
2362 *
2363 * Returns the state of from-host bulk data slots.
2364 *
2365 * Arguments:
2366 * card Pointer to the card context struct
2367 *
2368 * Returns:
2369 * 1 The from-host bulk data slots are all empty (available).
2370 * 0 Some or all the from-host bulk data slots are in use.
2371 * ---------------------------------------------------------------------------
2372 */
2373u16 CardAreAllFromHostDataSlotsEmpty(card_t *card)
2374{
2375 u16 i;
2376
2377 for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
2378 {
2379 if (card->from_host_data[i].bd.data_length != 0)
2380 {
2381 return 0;
2382 }
2383 }
2384
2385 return 1;
2386} /* CardGetFreeFromHostDataSlots() */
2387
2388
2389static CsrResult unifi_identify_hw(card_t *card)
2390{
2391
2392 card->chip_id = card->sdio_if->sdioId.cardId;
2393 card->function = card->sdio_if->sdioId.sdioFunction;
2394 card->sdio_io_block_size = card->sdio_if->blockSize;
2395
2396 /* If SDIO controller doesn't support byte mode CMD53, pad transfers to block sizes */
2397 card->sdio_io_block_pad = (card->sdio_if->features & CSR_SDIO_FEATURE_BYTE_MODE)?FALSE : TRUE;
2398
2399 /*
2400 * Setup the chip helper so that we can access the registers (and
2401 * also tell what sub-type of HIP we should use).
2402 */
2403 card->helper = ChipHelper_GetVersionSdio((u8)card->chip_id);
2404 if (!card->helper)
2405 {
2406 unifi_error(card->ospriv, "Null ChipHelper\n");
2407 }
2408
2409 unifi_info(card->ospriv, "Chip ID 0x%02X Function %u Block Size %u Name %s(%s)\n",
2410 card->chip_id, card->function, card->sdio_io_block_size,
2411 ChipHelper_MarketingName(card->helper),
2412 ChipHelper_FriendlyName(card->helper));
2413
2414 return CSR_RESULT_SUCCESS;
2415} /* unifi_identify_hw() */
2416
2417
2418static CsrResult unifi_prepare_hw(card_t *card)
2419{
2420 CsrResult r;
2421 CsrResult csrResult;
2422 enum unifi_host_state old_state = card->host_state;
2423
2424 r = unifi_identify_hw(card);
2425 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2426 {
2427 return r;
2428 }
2429 if (r != CSR_RESULT_SUCCESS)
2430 {
2431 unifi_error(card->ospriv, "Failed to identify hw\n");
2432 return r;
2433 }
2434
2435 unifi_trace(card->ospriv, UDBG1,
2436 "%s mode SDIO\n", card->sdio_io_block_pad?"Block" : "Byte");
2437 /*
2438 * Chip must be a awake or blocks that are asleep may not get
2439 * reset. We can only do this after we have read the chip_id.
2440 */
2441 r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
2442 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2443 {
2444 return r;
2445 }
2446
2447 if (old_state == UNIFI_HOST_STATE_TORPID)
2448 {
2449 /* Ensure the initial clock rate is set; if a reset occurred when the chip was
2450 * TORPID, unifi_set_host_state() may have raised it to MAX.
2451 */
2452 csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_INIT_HZ);
2453 if (csrResult != CSR_RESULT_SUCCESS)
2454 {
2455 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
2456 return r;
2457 }
2458 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_INIT_HZ;
2459 }
2460
2461 /*
2462 * The WLAN function must be enabled to access MAILBOX2 and DEBUG_RST
2463 * registers.
2464 */
2465 csrResult = CsrSdioFunctionEnable(card->sdio_if);
2466 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
2467 {
2468 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
2469 }
2470 if (csrResult != CSR_RESULT_SUCCESS)
2471 {
2472 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
2473 /* Can't enable WLAN function. Try resetting the SDIO block. */
2474 unifi_error(card->ospriv, "Failed to re-enable function %d.\n", card->function);
2475 return r;
2476 }
2477
2478 /*
2479 * Poke some registers to make sure the PLL has started,
2480 * otherwise memory accesses are likely to fail.
2481 */
2482 bootstrap_chip_hw(card);
2483
2484 /* Try to read the chip version from register. */
2485 r = unifi_read_chip_version(card);
2486 if (r != CSR_RESULT_SUCCESS)
2487 {
2488 return r;
2489 }
2490
2491 return CSR_RESULT_SUCCESS;
2492} /* unifi_prepare_hw() */
2493
2494
2495static CsrResult unifi_read_chip_version(card_t *card)
2496{
2497 u32 gbl_chip_version;
2498 CsrResult r;
2499 u16 ver;
2500
2501 gbl_chip_version = ChipHelper_GBL_CHIP_VERSION(card->helper);
2502
2503 /* Try to read the chip version from register. */
2504 if (gbl_chip_version != 0)
2505 {
2506 r = unifi_read_direct16(card, gbl_chip_version * 2, &ver);
2507 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2508 {
2509 return r;
2510 }
2511 if (r != CSR_RESULT_SUCCESS)
2512 {
2513 unifi_error(card->ospriv, "Failed to read GBL_CHIP_VERSION\n");
2514 return r;
2515 }
2516 card->chip_version = ver;
2517 }
2518 else
2519 {
2520 unifi_info(card->ospriv, "Unknown Chip ID, cannot locate GBL_CHIP_VERSION\n");
2521 r = CSR_RESULT_FAILURE;
2522 }
2523
2524 unifi_info(card->ospriv, "Chip Version 0x%04X\n", card->chip_version);
2525
2526 return r;
2527} /* unifi_read_chip_version() */
2528
2529
2530/*
2531 * ---------------------------------------------------------------------------
2532 * unifi_reset_hardware
2533 *
2534 * Execute the UniFi reset sequence.
2535 *
2536 * Note: This may fail if the chip is going TORPID so retry at
2537 * least once.
2538 *
2539 * Arguments:
2540 * card - pointer to card context structure
2541 *
2542 * Returns:
2543 * CSR_RESULT_SUCCESS on success, CSR error otherwise.
2544 *
2545 * Notes:
2546 * Some platforms (e.g. Windows Vista) do not allow access to registers
2547 * that are necessary for a software soft reset.
2548 * ---------------------------------------------------------------------------
2549 */
2550static CsrResult unifi_reset_hardware(card_t *card)
2551{
2552 CsrResult r;
2553 u16 new_block_size = UNIFI_IO_BLOCK_SIZE;
2554 CsrResult csrResult;
2555
2556 /* Errors returned by unifi_prepare_hw() are not critical at this point */
2557 r = unifi_prepare_hw(card);
2558 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2559 {
2560 return r;
2561 }
2562
2563 /* First try SDIO controller reset, which may power cycle the UniFi, assert
2564 * its reset line, or not be implemented depending on the platform.
2565 */
2566 unifi_info(card->ospriv, "Calling CsrSdioHardReset\n");
2567 csrResult = CsrSdioHardReset(card->sdio_if);
2568 if (csrResult == CSR_RESULT_SUCCESS)
2569 {
2570 unifi_info(card->ospriv, "CsrSdioHardReset succeeded on resetting UniFi\n");
2571 r = unifi_prepare_hw(card);
2572 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2573 {
2574 return r;
2575 }
2576 if (r != CSR_RESULT_SUCCESS)
2577 {
2578 unifi_error(card->ospriv, "unifi_prepare_hw failed after hard reset\n");
2579 return r;
2580 }
2581 }
2582 else if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
2583 {
2584 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
2585 }
2586 else
2587 {
2588 /* Falling back to software hard reset methods */
2589 unifi_info(card->ospriv, "Falling back to software hard reset\n");
2590 r = unifi_card_hard_reset(card);
2591 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2592 {
2593 return r;
2594 }
2595 if (r != CSR_RESULT_SUCCESS)
2596 {
2597 unifi_error(card->ospriv, "software hard reset failed\n");
2598 return r;
2599 }
2600
2601 /* If we fell back to unifi_card_hard_reset() methods, chip version may
2602 * not have been read. (Note in the unlikely event that it is zero,
2603 * it will be harmlessly read again)
2604 */
2605 if (card->chip_version == 0)
2606 {
2607 r = unifi_read_chip_version(card);
2608 if (r != CSR_RESULT_SUCCESS)
2609 {
2610 return r;
2611 }
2612 }
2613 }
2614
2615#ifdef CSR_WIFI_HIP_SDIO_BLOCK_SIZE
2616 new_block_size = CSR_WIFI_HIP_SDIO_BLOCK_SIZE;
2617#endif
2618
2619 /* After hard reset, we need to restore the SDIO block size */
2620 csrResult = CsrSdioBlockSizeSet(card->sdio_if, new_block_size);
2621 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
2622
2623 /* Warn if a different block size was achieved by the transport */
2624 if (card->sdio_if->blockSize != new_block_size)
2625 {
2626 unifi_info(card->ospriv,
2627 "Actually got block size %d\n", card->sdio_if->blockSize);
2628 }
2629
2630 /* sdio_io_block_size always needs be updated from the achieved block size,
2631 * as it is used by the OS layer to allocate memory in unifi_net_malloc().
2632 * Controllers which don't support block mode (e.g. CSPI) will report a
2633 * block size of zero.
2634 */
2635 if (card->sdio_if->blockSize == 0)
2636 {
2637 unifi_info(card->ospriv, "Block size 0, block mode not available\n");
2638
2639 /* Set sdio_io_block_size to 1 so that unifi_net_data_malloc() has a
2640 * sensible rounding value. Elsewhere padding will already be
2641 * disabled because the controller supports byte mode.
2642 */
2643 card->sdio_io_block_size = 1;
2644
2645 /* Controller features must declare support for byte mode */
2646 if (!(card->sdio_if->features & CSR_SDIO_FEATURE_BYTE_MODE))
2647 {
2648 unifi_error(card->ospriv, "Requires byte mode\n");
2649 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
2650 }
2651 }
2652 else
2653 {
2654 /* Padding will be enabled if CSR_SDIO_FEATURE_BYTE_MODE isn't set */
2655 card->sdio_io_block_size = card->sdio_if->blockSize;
2656 }
2657
2658
2659 return r;
2660} /* unifi_reset_hardware() */
2661
2662
2663/*
2664 * ---------------------------------------------------------------------------
2665 * card_reset_method_io_enable
2666 *
2667 * Issue a hard reset to the hw writing the IO_ENABLE.
2668 *
2669 * Arguments:
2670 * card Pointer to Card object
2671 *
2672 * Returns:
2673 * 0 on success,
2674 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
2675 * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
2676 * was not seen in the expected time
2677 * ---------------------------------------------------------------------------
2678 */
2679static CsrResult card_reset_method_io_enable(card_t *card)
2680{
2681 CsrResult r;
2682 CsrResult csrResult;
2683
2684 /*
2685 * This resets only function 1, so should be used in
2686 * preference to the method below (CSR_FUNC_EN)
2687 */
2688 unifi_trace(card->ospriv, UDBG1, "Hard reset (IO_ENABLE)\n");
2689
2690 csrResult = CsrSdioFunctionDisable(card->sdio_if);
2691 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
2692 {
2693 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
2694 }
2695 if (csrResult != CSR_RESULT_SUCCESS)
2696 {
2697 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
2698 unifi_warning(card->ospriv, "SDIO error writing IO_ENABLE: %d\n", r);
2699 }
2700 else
2701 {
2702 /* Delay here to let the reset take affect. */
2703 CsrThreadSleep(RESET_SETTLE_DELAY);
2704
2705 r = card_wait_for_unifi_to_disable(card);
2706 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2707 {
2708 return r;
2709 }
2710
2711 if (r == CSR_RESULT_SUCCESS)
2712 {
2713 r = card_wait_for_unifi_to_reset(card);
2714 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2715 {
2716 return r;
2717 }
2718 }
2719 }
2720
2721 if (r != CSR_RESULT_SUCCESS)
2722 {
2723 unifi_trace(card->ospriv, UDBG1, "Hard reset (CSR_FUNC_EN)\n");
2724
2725 r = sdio_write_f0(card, SDIO_CSR_FUNC_EN, 0);
2726 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2727 {
2728 return r;
2729 }
2730 if (r != CSR_RESULT_SUCCESS)
2731 {
2732 unifi_warning(card->ospriv, "SDIO error writing SDIO_CSR_FUNC_EN: %d\n", r);
2733 return r;
2734 }
2735 else
2736 {
2737 /* Delay here to let the reset take affect. */
2738 CsrThreadSleep(RESET_SETTLE_DELAY);
2739
2740 r = card_wait_for_unifi_to_reset(card);
2741 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2742 {
2743 return r;
2744 }
2745 }
2746 }
2747
2748 if (r != CSR_RESULT_SUCCESS)
2749 {
2750 unifi_warning(card->ospriv, "card_reset_method_io_enable failed to reset UniFi\n");
2751 }
2752
2753 return r;
2754} /* card_reset_method_io_enable() */
2755
2756
2757/*
2758 * ---------------------------------------------------------------------------
2759 * card_reset_method_dbg_reset
2760 *
2761 * Issue a hard reset to the hw writing the DBG_RESET.
2762 *
2763 * Arguments:
2764 * card Pointer to Card object
2765 *
2766 * Returns:
2767 * CSR_RESULT_SUCCESS on success,
2768 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
2769 * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
2770 * was not seen in the expected time
2771 * ---------------------------------------------------------------------------
2772 */
2773static CsrResult card_reset_method_dbg_reset(card_t *card)
2774{
2775 CsrResult r;
2776
2777 /*
2778 * Prepare UniFi for h/w reset
2779 */
2780 if (card->host_state == UNIFI_HOST_STATE_TORPID)
2781 {
2782 r = unifi_set_host_state(card, UNIFI_HOST_STATE_DROWSY);
2783 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2784 {
2785 return r;
2786 }
2787 if (r != CSR_RESULT_SUCCESS)
2788 {
2789 unifi_error(card->ospriv, "Failed to set UNIFI_HOST_STATE_DROWSY\n");
2790 return r;
2791 }
2792 CsrThreadSleep(5);
2793 }
2794
2795 r = unifi_card_stop_processor(card, UNIFI_PROC_BOTH);
2796 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2797 {
2798 return r;
2799 }
2800 if (r != CSR_RESULT_SUCCESS)
2801 {
2802 unifi_error(card->ospriv, "Can't stop processors\n");
2803 return r;
2804 }
2805
2806 unifi_trace(card->ospriv, UDBG1, "Hard reset (DBG_RESET)\n");
2807
2808 /*
2809 * This register write may fail. The debug reset resets
2810 * parts of the Function 0 sections of the chip, and
2811 * therefore the response cannot be sent back to the host.
2812 */
2813 r = unifi_write_direct_8_or_16(card, ChipHelper_DBG_RESET(card->helper) * 2, 1);
2814 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2815 {
2816 return r;
2817 }
2818 if (r != CSR_RESULT_SUCCESS)
2819 {
2820 unifi_warning(card->ospriv, "SDIO error writing DBG_RESET: %d\n", r);
2821 return r;
2822 }
2823
2824 /* Delay here to let the reset take affect. */
2825 CsrThreadSleep(RESET_SETTLE_DELAY);
2826
2827 r = card_wait_for_unifi_to_reset(card);
2828 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2829 {
2830 return r;
2831 }
2832 if (r != CSR_RESULT_SUCCESS)
2833 {
2834 unifi_warning(card->ospriv, "card_reset_method_dbg_reset failed to reset UniFi\n");
2835 }
2836
2837 return r;
2838} /* card_reset_method_dbg_reset() */
2839
2840
2841/*
2842 * ---------------------------------------------------------------------------
2843 * unifi_card_hard_reset
2844 *
2845 * Issue reset to hardware, by writing to registers on the card.
2846 * Power to the card is preserved.
2847 *
2848 * Arguments:
2849 * card Pointer to Card object
2850 *
2851 * Returns:
2852 * CSR_RESULT_SUCCESS on success,
2853 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
2854 * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
2855 * was not seen in the expected time
2856 * ---------------------------------------------------------------------------
2857 */
2858CsrResult unifi_card_hard_reset(card_t *card)
2859{
2860 CsrResult r;
2861 const struct chip_helper_reset_values *init_data;
2862 u32 chunks;
2863
2864 /* Clear cache of page registers */
2865 card->proc_select = (u32)(-1);
2866 card->dmem_page = (u32)(-1);
2867 card->pmem_page = (u32)(-1);
2868
2869 /*
2870 * We need to have a valid card->helper before we use software hard reset.
2871 * If unifi_identify_hw() fails to get the card ID, it probably means
2872 * that there is no way to talk to the h/w.
2873 */
2874 r = unifi_identify_hw(card);
2875 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2876 {
2877 return r;
2878 }
2879 if (r != CSR_RESULT_SUCCESS)
2880 {
2881 unifi_error(card->ospriv, "unifi_card_hard_reset failed to identify h/w\n");
2882 return r;
2883 }
2884
2885 /* Search for some reset code. */
2886 chunks = ChipHelper_HostResetSequence(card->helper, &init_data);
2887 if (chunks != 0)
2888 {
2889 unifi_error(card->ospriv,
2890 "Hard reset (Code download) is unsupported\n");
2891
2892 return CSR_RESULT_FAILURE;
2893 }
2894
2895 if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
2896 {
2897 /* The HIP spec considers this a bus-specific reset.
2898 * This resets only function 1, so should be used in
2899 * preference to the method below (CSR_FUNC_EN)
2900 * If this method fails, it means that the f/w is probably
2901 * not running. In this case, try the DBG_RESET method.
2902 */
2903 r = card_reset_method_io_enable(card);
2904 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2905 {
2906 return r;
2907 }
2908 if (r == CSR_RESULT_SUCCESS)
2909 {
2910 return r;
2911 }
2912 }
2913
2914 /* Software hard reset */
2915 r = card_reset_method_dbg_reset(card);
2916
2917 return r;
2918} /* unifi_card_hard_reset() */
2919
2920
2921/*
2922 * ---------------------------------------------------------------------------
2923 *
2924 * CardGenInt
2925 *
2926 * Prod the card.
2927 * This function causes an internal interrupt to be raised in the
2928 * UniFi chip. It is used to signal the firmware that some action has
2929 * been completed.
2930 * The UniFi Host Interface asks that the value used increments for
2931 * debugging purposes.
2932 *
2933 * Arguments:
2934 * card Pointer to Card object
2935 *
2936 * Returns:
2937 * CSR_RESULT_SUCCESS on success,
2938 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
2939 * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
2940 * was not seen in the expected time
2941 * ---------------------------------------------------------------------------
2942 */
2943CsrResult CardGenInt(card_t *card)
2944{
2945 CsrResult r;
2946
2947 if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
2948 {
2949 r = sdio_write_f0(card, SDIO_CSR_FROM_HOST_SCRATCH0,
2950 (u8)card->unifi_interrupt_seq);
2951 }
2952 else
2953 {
2954 r = unifi_write_direct_8_or_16(card,
2955 ChipHelper_SHARED_IO_INTERRUPT(card->helper) * 2,
2956 (u8)card->unifi_interrupt_seq);
2957 }
2958 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2959 {
2960 return r;
2961 }
2962 if (r != CSR_RESULT_SUCCESS)
2963 {
2964 unifi_error(card->ospriv, "SDIO error writing UNIFI_SHARED_IO_INTERRUPT: %d\n", r);
2965 return r;
2966 }
2967
2968 card->unifi_interrupt_seq++;
2969
2970 return CSR_RESULT_SUCCESS;
2971} /* CardGenInt() */
2972
2973
2974/*
2975 * ---------------------------------------------------------------------------
2976 * CardEnableInt
2977 *
2978 * Enable the outgoing SDIO interrupt from UniFi to the host.
2979 *
2980 * Arguments:
2981 * card Pointer to Card object
2982 *
2983 * Returns:
2984 * CSR_RESULT_SUCCESS on success,
2985 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
2986 * CSR_RESULT_FAILURE if an SDIO error occurred,
2987 * ---------------------------------------------------------------------------
2988 */
2989CsrResult CardEnableInt(card_t *card)
2990{
2991 CsrResult r;
2992 u8 int_enable;
2993
2994 r = sdio_read_f0(card, SDIO_INT_ENABLE, &int_enable);
2995 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2996 {
2997 return r;
2998 }
2999 if (r != CSR_RESULT_SUCCESS)
3000 {
3001 unifi_error(card->ospriv, "SDIO error reading SDIO_INT_ENABLE\n");
3002 return r;
3003 }
3004
3005 int_enable |= (1 << card->function) | UNIFI_SD_INT_ENABLE_IENM;
3006
3007 r = sdio_write_f0(card, SDIO_INT_ENABLE, int_enable);
3008 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3009 {
3010 return r;
3011 }
3012 if (r != CSR_RESULT_SUCCESS)
3013 {
3014 unifi_error(card->ospriv, "SDIO error writing SDIO_INT_ENABLE\n");
3015 return r;
3016 }
3017
3018 return CSR_RESULT_SUCCESS;
3019} /* CardEnableInt() */
3020
3021
3022/*
3023 * ---------------------------------------------------------------------------
3024 * CardDisableInt
3025 *
3026 * Disable the outgoing SDIO interrupt from UniFi to the host.
3027 *
3028 * Arguments:
3029 * card Pointer to Card object
3030 *
3031 * Returns:
3032 * CSR_RESULT_SUCCESS on success,
3033 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
3034 * CSR_RESULT_FAILURE if an SDIO error occurred,
3035 * ---------------------------------------------------------------------------
3036 */
3037CsrResult CardDisableInt(card_t *card)
3038{
3039 CsrResult r;
3040 u8 int_enable;
3041
3042 r = sdio_read_f0(card, SDIO_INT_ENABLE, &int_enable);
3043 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3044 {
3045 return r;
3046 }
3047 if (r != CSR_RESULT_SUCCESS)
3048 {
3049 unifi_error(card->ospriv, "SDIO error reading SDIO_INT_ENABLE\n");
3050 return r;
3051 }
3052
3053 int_enable &= ~(1 << card->function);
3054
3055 r = sdio_write_f0(card, SDIO_INT_ENABLE, int_enable);
3056 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3057 {
3058 return r;
3059 }
3060 if (r != CSR_RESULT_SUCCESS)
3061 {
3062 unifi_error(card->ospriv, "SDIO error writing SDIO_INT_ENABLE\n");
3063 return r;
3064 }
3065
3066 return CSR_RESULT_SUCCESS;
3067} /* CardDisableInt() */
3068
3069
3070/*
3071 * ---------------------------------------------------------------------------
3072 * CardPendingInt
3073 *
3074 * Determine whether UniFi is currently asserting the SDIO interrupt
3075 * request.
3076 *
3077 * Arguments:
3078 * card Pointer to Card object
3079 * pintr Pointer to location to write interrupt status,
3080 * TRUE if interrupt pending,
3081 * FALSE if no interrupt pending.
3082 * Returns:
3083 * CSR_RESULT_SUCCESS interrupt status read successfully
3084 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
3085 * CSR_RESULT_FAILURE if an SDIO error occurred,
3086 * ---------------------------------------------------------------------------
3087 */
3088CsrResult CardPendingInt(card_t *card, u8 *pintr)
3089{
3090 CsrResult r;
3091 u8 pending;
3092
3093 *pintr = FALSE;
3094
3095 r = sdio_read_f0(card, SDIO_INT_PENDING, &pending);
3096 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3097 {
3098 return r;
3099 }
3100 if (r != CSR_RESULT_SUCCESS)
3101 {
3102 unifi_error(card->ospriv, "SDIO error reading SDIO_INT_PENDING\n");
3103 return r;
3104 }
3105
3106 *pintr = (pending & (1 << card->function))?TRUE : FALSE;
3107
3108 return CSR_RESULT_SUCCESS;
3109} /* CardPendingInt() */
3110
3111
3112/*
3113 * ---------------------------------------------------------------------------
3114 * CardClearInt
3115 *
3116 * Clear the UniFi SDIO interrupt request.
3117 *
3118 * Arguments:
3119 * card Pointer to Card object
3120 *
3121 * Returns:
3122 * CSR_RESULT_SUCCESS if pending interrupt was cleared, or no pending interrupt.
3123 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
3124 * CSR_RESULT_FAILURE if an SDIO error occurred,
3125 * ---------------------------------------------------------------------------
3126 */
3127CsrResult CardClearInt(card_t *card)
3128{
3129 CsrResult r;
3130 u8 intr;
3131
3132 if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
3133 {
3134 /* CardPendingInt() sets intr, if there is a pending interrupt */
3135 r = CardPendingInt(card, &intr);
3136 if (intr == FALSE)
3137 {
3138 return r;
3139 }
3140
3141 r = sdio_write_f0(card, SDIO_CSR_HOST_INT_CLEAR, 1);
3142 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3143 {
3144 return r;
3145 }
3146 if (r != CSR_RESULT_SUCCESS)
3147 {
3148 unifi_error(card->ospriv, "SDIO error writing SDIO_CSR_HOST_INT_CLEAR\n");
3149 }
3150 }
3151 else
3152 {
3153 r = unifi_write_direct_8_or_16(card,
3154 ChipHelper_SDIO_HOST_INT(card->helper) * 2,
3155 0);
3156 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3157 {
3158 return r;
3159 }
3160 if (r != CSR_RESULT_SUCCESS)
3161 {
3162 unifi_error(card->ospriv, "SDIO error writing UNIFI_SDIO_HOST_INT\n");
3163 }
3164 }
3165
3166 return r;
3167} /* CardClearInt() */
3168
3169
3170/*
3171 * ---------------------------------------------------------------------------
3172 * CardIntEnabled
3173 *
3174 * Determine whether UniFi is currently asserting the SDIO interrupt
3175 * request.
3176 *
3177 * Arguments:
3178 * card Pointer to Card object
3179 * enabled Pointer to location to write interrupt enable status,
3180 * TRUE if interrupts enabled,
3181 * FALSE if interupts disabled.
3182 *
3183 * Returns:
3184 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
3185 * CSR_RESULT_FAILURE if an SDIO error occurred,
3186 * ---------------------------------------------------------------------------
3187 */
3188CsrResult CardIntEnabled(card_t *card, u8 *enabled)
3189{
3190 CsrResult r;
3191 u8 int_enable;
3192
3193 r = sdio_read_f0(card, SDIO_INT_ENABLE, &int_enable);
3194 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3195 {
3196 return r;
3197 }
3198 if (r != CSR_RESULT_SUCCESS)
3199 {
3200 unifi_error(card->ospriv, "SDIO error reading SDIO_INT_ENABLE\n");
3201 return r;
3202 }
3203
3204 *enabled = (int_enable & (1 << card->function))?TRUE : FALSE;
3205
3206 return CSR_RESULT_SUCCESS;
3207} /* CardIntEnabled() */
3208
3209
3210/*
3211 * ---------------------------------------------------------------------------
3212 * CardWriteBulkData
3213 * Allocate slot in the pending bulkdata arrays and assign it to a signal's
3214 * bulkdata reference. The slot is then ready for UniFi's bulkdata commands
3215 * to transfer the data to/from the host.
3216 *
3217 * Arguments:
3218 * card Pointer to Card object
3219 * csptr Pending signal pointer, including bulkdata ref
3220 * queue Traffic queue that this signal is using
3221 *
3222 * Returns:
3223 * CSR_RESULT_SUCCESS if a free slot was assigned
3224 * CSR_RESULT_FAILURE if no slot was available
3225 * ---------------------------------------------------------------------------
3226 */
3227CsrResult CardWriteBulkData(card_t *card, card_signal_t *csptr, unifi_TrafficQueue queue)
3228{
3229 u16 i, slots[UNIFI_MAX_DATA_REFERENCES], j = 0;
3230 u8 *packed_sigptr, num_slots_required = 0;
3231 bulk_data_desc_t *bulkdata = csptr->bulkdata;
3232 s16 h, nslots;
3233
3234 /* Count the number of slots required */
3235 for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
3236 {
3237 if (bulkdata[i].data_length != 0)
3238 {
3239 num_slots_required++;
3240 }
3241 }
3242
3243 /* Get the slot numbers */
3244 if (num_slots_required != 0)
3245 {
3246 /* Last 2 slots for MLME */
3247 if (queue == UNIFI_TRAFFIC_Q_MLME)
3248 {
3249 h = card->config_data.num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS;
3250 for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
3251 {
3252 if (card->from_host_data[h].bd.data_length == 0)
3253 {
3254 /* Free data slot, claim it */
3255 slots[j++] = h;
3256 if (j == num_slots_required)
3257 {
3258 break;
3259 }
3260 }
3261
3262 if (++h >= card->config_data.num_fromhost_data_slots)
3263 {
3264 h = 0;
3265 }
3266 }
3267 }
3268 else
3269 {
3270 if (card->dynamic_slot_data.from_host_used_slots[queue]
3271 < card->dynamic_slot_data.from_host_max_slots[queue])
3272 {
3273 /* Data commands get a free slot only after a few checks */
3274 nslots = card->config_data.num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS;
3275
3276 h = card->from_host_data_head;
3277
3278 for (i = 0; i < nslots; i++)
3279 {
3280 if (card->from_host_data[h].bd.data_length == 0)
3281 {
3282 /* Free data slot, claim it */
3283 slots[j++] = h;
3284 if (j == num_slots_required)
3285 {
3286 break;
3287 }
3288 }
3289
3290 if (++h >= nslots)
3291 {
3292 h = 0;
3293 }
3294 }
3295 card->from_host_data_head = h;
3296 }
3297 }
3298
3299 /* Required number of slots are not available, bail out */
3300 if (j != num_slots_required)
3301 {
3302 unifi_trace(card->ospriv, UDBG5, "CardWriteBulkData: didn't find free slot/s\n");
3303
3304 /* If we haven't already reached the stable state we can ask for reservation */
3305 if ((queue != UNIFI_TRAFFIC_Q_MLME) && (card->dynamic_slot_data.queue_stable[queue] == FALSE))
3306 {
3307 CardCheckDynamicReservation(card, queue);
3308 }
3309
3310 for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
3311 {
3312 unifi_trace(card->ospriv, UDBG5, "fh data slot %d: %d\n", i, card->from_host_data[i].bd.data_length);
3313 }
3314 return CSR_RESULT_FAILURE;
3315 }
3316 }
3317
3318 packed_sigptr = csptr->sigbuf;
3319
3320 /* Fill in the slots with data */
3321 j = 0;
3322 for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
3323 {
3324 if (bulkdata[i].data_length == 0)
3325 {
3326 /* Zero-out the DATAREF in the signal */
3327 SET_PACKED_DATAREF_SLOT(packed_sigptr, i, 0);
3328 SET_PACKED_DATAREF_LEN(packed_sigptr, i, 0);
3329 }
3330 else
3331 {
3332 /*
3333 * Fill in the slot number in the SIGNAL structure but
3334 * preserve the offset already in there
3335 */
3336 SET_PACKED_DATAREF_SLOT(packed_sigptr, i, slots[j] | (((u16)packed_sigptr[SIZEOF_SIGNAL_HEADER + (i * SIZEOF_DATAREF) + 1]) << 8));
3337 SET_PACKED_DATAREF_LEN(packed_sigptr, i, bulkdata[i].data_length);
3338
3339 /* Do not copy the data, just store the information to them */
3340 card->from_host_data[slots[j]].bd.os_data_ptr = bulkdata[i].os_data_ptr;
3341 card->from_host_data[slots[j]].bd.os_net_buf_ptr = bulkdata[i].os_net_buf_ptr;
3342 card->from_host_data[slots[j]].bd.data_length = bulkdata[i].data_length;
3343 card->from_host_data[slots[j]].bd.net_buf_length = bulkdata[i].net_buf_length;
3344 card->from_host_data[slots[j]].queue = queue;
3345
3346 unifi_trace(card->ospriv, UDBG4, "CardWriteBulkData sig=0x%x, fh slot %d = %p\n",
3347 GET_SIGNAL_ID(packed_sigptr), i, bulkdata[i].os_data_ptr);
3348
3349 /* Sanity-check that the bulk data desc being assigned to the slot
3350 * actually has a payload.
3351 */
3352 if (!bulkdata[i].os_data_ptr)
3353 {
3354 unifi_error(card->ospriv, "Assign null os_data_ptr (len=%d) fh slot %d, i=%d, q=%d, sig=0x%x",
3355 bulkdata[i].data_length, slots[j], i, queue, GET_SIGNAL_ID(packed_sigptr));
3356 }
3357
3358 j++;
3359 if (queue < UNIFI_NO_OF_TX_QS)
3360 {
3361 card->dynamic_slot_data.from_host_used_slots[queue]++;
3362 }
3363 }
3364 }
3365
3366 return CSR_RESULT_SUCCESS;
3367} /* CardWriteBulkData() */
3368
3369
3370/*
3371 * ---------------------------------------------------------------------------
3372 * card_find_data_slot
3373 *
3374 * Dereference references to bulk data slots into pointers to real data.
3375 *
3376 * Arguments:
3377 * card Pointer to the card struct.
3378 * slot Slot number from a signal structure
3379 *
3380 * Returns:
3381 * Pointer to entry in bulk_data_slot array.
3382 * ---------------------------------------------------------------------------
3383 */
3384bulk_data_desc_t* card_find_data_slot(card_t *card, s16 slot)
3385{
3386 s16 sn;
3387 bulk_data_desc_t *bd;
3388
3389 sn = slot & 0x7FFF;
3390
3391 /* ?? check sanity of slot number ?? */
3392
3393 if (slot & SLOT_DIR_TO_HOST)
3394 {
3395 bd = &card->to_host_data[sn];
3396 }
3397 else
3398 {
3399 bd = &card->from_host_data[sn].bd;
3400 }
3401
3402 return bd;
3403} /* card_find_data_slot() */
3404
3405
3406/*
3407 * ---------------------------------------------------------------------------
3408 * firmware_present_in_flash
3409 *
3410 * Probe for external Flash that looks like it might contain firmware.
3411 *
3412 * If Flash is not present, reads always return 0x0008.
3413 * If Flash is present, but empty, reads return 0xFFFF.
3414 * Anything else is considered to be firmware.
3415 *
3416 * Arguments:
3417 * card Pointer to card struct
3418 *
3419 * Returns:
3420 * CSR_RESULT_SUCCESS firmware is present in ROM or flash
3421 * CSR_WIFI_HIP_RESULT_NOT_FOUND firmware is not present in ROM or flash
3422 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
3423 * CSR_RESULT_FAILURE if an SDIO error occurred
3424 * ---------------------------------------------------------------------------
3425 */
3426static CsrResult firmware_present_in_flash(card_t *card)
3427{
3428 CsrResult r;
3429 u16 m1, m5;
3430
3431 if (ChipHelper_HasRom(card->helper))
3432 {
3433 return CSR_RESULT_SUCCESS;
3434 }
3435 if (!ChipHelper_HasFlash(card->helper))
3436 {
3437 return CSR_WIFI_HIP_RESULT_NOT_FOUND;
3438 }
3439
3440 /*
3441 * Examine the Flash locations that are the power-on default reset
3442 * vectors of the XAP processors.
3443 * These are words 1 and 5 in Flash.
3444 */
3445 r = unifi_card_read16(card, UNIFI_MAKE_GP(EXT_FLASH, 2), &m1);
3446 if (r != CSR_RESULT_SUCCESS)
3447 {
3448 return r;
3449 }
3450
3451 r = unifi_card_read16(card, UNIFI_MAKE_GP(EXT_FLASH, 10), &m5);
3452 if (r != CSR_RESULT_SUCCESS)
3453 {
3454 return r;
3455 }
3456
3457 /* Check for uninitialised/missing flash */
3458 if ((m1 == 0x0008) || (m1 == 0xFFFF) ||
3459 (m1 == 0x0004) || (m5 == 0x0004) ||
3460 (m5 == 0x0008) || (m5 == 0xFFFF))
3461 {
3462 return CSR_WIFI_HIP_RESULT_NOT_FOUND;
3463 }
3464
3465 return CSR_RESULT_SUCCESS;
3466} /* firmware_present_in_flash() */
3467
3468
3469/*
3470 * ---------------------------------------------------------------------------
3471 * bootstrap_chip_hw
3472 *
3473 * Perform chip specific magic to "Get It Working" TM. This will
3474 * increase speed of PLLs in analogue and maybe enable some
3475 * on-chip regulators.
3476 *
3477 * Arguments:
3478 * card Pointer to card struct
3479 *
3480 * Returns:
3481 * None.
3482 * ---------------------------------------------------------------------------
3483 */
3484static void bootstrap_chip_hw(card_t *card)
3485{
3486 const struct chip_helper_init_values *vals;
3487 u32 i, len;
3488 void *sdio = card->sdio_if;
3489 CsrResult csrResult;
3490
3491 len = ChipHelper_ClockStartupSequence(card->helper, &vals);
3492 if (len != 0)
3493 {
3494 for (i = 0; i < len; i++)
3495 {
3496 csrResult = CsrSdioWrite16(sdio, vals[i].addr * 2, vals[i].value);
3497 if (csrResult != CSR_RESULT_SUCCESS)
3498 {
3499 unifi_warning(card->ospriv, "Failed to write bootstrap value %d\n", i);
3500 /* Might not be fatal */
3501 }
3502
3503 CsrThreadSleep(1);
3504 }
3505 }
3506} /* bootstrap_chip_hw() */
3507
3508
3509/*
3510 * ---------------------------------------------------------------------------
3511 * unifi_card_stop_processor
3512 *
3513 * Stop the UniFi XAP processors.
3514 *
3515 * Arguments:
3516 * card Pointer to card struct
3517 * which One of UNIFI_PROC_MAC, UNIFI_PROC_PHY, UNIFI_PROC_BOTH
3518 *
3519 * Returns:
3520 * CSR_RESULT_SUCCESS if successful, or CSR error code
3521 * ---------------------------------------------------------------------------
3522 */
3523CsrResult unifi_card_stop_processor(card_t *card, enum unifi_dbg_processors_select which)
3524{
3525 CsrResult r = CSR_RESULT_SUCCESS;
3526 u8 status;
3527 s16 retry = 100;
3528
3529 while (retry--)
3530 {
3531 /* Select both XAPs */
3532 r = unifi_set_proc_select(card, which);
3533 if (r != CSR_RESULT_SUCCESS)
3534 {
3535 break;
3536 }
3537
3538 /* Stop processors */
3539 r = unifi_write_direct16(card, ChipHelper_DBG_EMU_CMD(card->helper) * 2, 2);
3540 if (r != CSR_RESULT_SUCCESS)
3541 {
3542 break;
3543 }
3544
3545 /* Read status */
3546 r = unifi_read_direct_8_or_16(card,
3547 ChipHelper_DBG_HOST_STOP_STATUS(card->helper) * 2,
3548 &status);
3549 if (r != CSR_RESULT_SUCCESS)
3550 {
3551 break;
3552 }
3553
3554 if ((status & 1) == 1)
3555 {
3556 /* Success! */
3557 return CSR_RESULT_SUCCESS;
3558 }
3559
3560 /* Processors didn't stop, try again */
3561 }
3562
3563 if (r != CSR_RESULT_SUCCESS)
3564 {
3565 /* An SDIO error occurred */
3566 unifi_error(card->ospriv, "Failed to stop processors: SDIO error\n");
3567 }
3568 else
3569 {
3570 /* If we reach here, we didn't the status in time. */
3571 unifi_error(card->ospriv, "Failed to stop processors: timeout waiting for stopped status\n");
3572 r = CSR_RESULT_FAILURE;
3573 }
3574
3575 return r;
3576} /* unifi_card_stop_processor() */
3577
3578
3579/*
3580 * ---------------------------------------------------------------------------
3581 * card_start_processor
3582 *
3583 * Start the UniFi XAP processors.
3584 *
3585 * Arguments:
3586 * card Pointer to card struct
3587 * which One of UNIFI_PROC_MAC, UNIFI_PROC_PHY, UNIFI_PROC_BOTH
3588 *
3589 * Returns:
3590 * CSR_RESULT_SUCCESS or CSR error code
3591 * ---------------------------------------------------------------------------
3592 */
3593CsrResult card_start_processor(card_t *card, enum unifi_dbg_processors_select which)
3594{
3595 CsrResult r;
3596
3597 /* Select both XAPs */
3598 r = unifi_set_proc_select(card, which);
3599 if (r != CSR_RESULT_SUCCESS)
3600 {
3601 unifi_error(card->ospriv, "unifi_set_proc_select failed: %d.\n", r);
3602 return r;
3603 }
3604
3605
3606 r = unifi_write_direct_8_or_16(card,
3607 ChipHelper_DBG_EMU_CMD(card->helper) * 2, 8);
3608 if (r != CSR_RESULT_SUCCESS)
3609 {
3610 return r;
3611 }
3612
3613 r = unifi_write_direct_8_or_16(card,
3614 ChipHelper_DBG_EMU_CMD(card->helper) * 2, 0);
3615 if (r != CSR_RESULT_SUCCESS)
3616 {
3617 return r;
3618 }
3619
3620 return CSR_RESULT_SUCCESS;
3621} /* card_start_processor() */
3622
3623
3624/*
3625 * ---------------------------------------------------------------------------
3626 * unifi_set_interrupt_mode
3627 *
3628 * Configure the interrupt processing mode used by the HIP
3629 *
3630 * Arguments:
3631 * card Pointer to card struct
3632 * mode Interrupt mode to apply
3633 *
3634 * Returns:
3635 * None
3636 * ---------------------------------------------------------------------------
3637 */
3638void unifi_set_interrupt_mode(card_t *card, u32 mode)
3639{
3640 if (mode == CSR_WIFI_INTMODE_RUN_BH_ONCE)
3641 {
3642 unifi_info(card->ospriv, "Scheduled interrupt mode");
3643 }
3644 card->intmode = mode;
3645} /* unifi_set_interrupt_mode() */
3646
3647
3648/*
3649 * ---------------------------------------------------------------------------
3650 * unifi_start_processors
3651 *
3652 * Start all UniFi XAP processors.
3653 *
3654 * Arguments:
3655 * card Pointer to card struct
3656 *
3657 * Returns:
3658 * CSR_RESULT_SUCCESS on success, CSR error code on error
3659 * ---------------------------------------------------------------------------
3660 */
3661CsrResult unifi_start_processors(card_t *card)
3662{
3663 return card_start_processor(card, UNIFI_PROC_BOTH);
3664} /* unifi_start_processors() */
3665
3666
3667/*
3668 * ---------------------------------------------------------------------------
3669 * unifi_request_max_sdio_clock
3670 *
3671 * Requests that the maximum SDIO clock rate is set at the next suitable
3672 * opportunity (e.g. when the BH next runs, so as not to interfere with
3673 * any current operation).
3674 *
3675 * Arguments:
3676 * card Pointer to card struct
3677 *
3678 * Returns:
3679 * None
3680 * ---------------------------------------------------------------------------
3681 */
3682void unifi_request_max_sdio_clock(card_t *card)
3683{
3684 card->request_max_clock = 1;
3685} /* unifi_request_max_sdio_clock() */
3686
3687
3688/*
3689 * ---------------------------------------------------------------------------
3690 * unifi_set_host_state
3691 *
3692 * Set the host deep-sleep state.
3693 *
3694 * If transitioning to TORPID, the SDIO driver will be notified
3695 * that the SD bus will be unused (idle) and conversely, when
3696 * transitioning from TORPID that the bus will be used (active).
3697 *
3698 * Arguments:
3699 * card Pointer to card struct
3700 * state New deep-sleep state.
3701 *
3702 * Returns:
3703 * CSR_RESULT_SUCCESS on success
3704 * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
3705 * CSR_RESULT_FAILURE if an SDIO error occurred
3706 *
3707 * Notes:
3708 * We need to reduce the SDIO clock speed before trying to wake up the
3709 * chip. Actually, in the implementation below we reduce the clock speed
3710 * not just before we try to wake up the chip, but when we put the chip to
3711 * deep sleep. This means that if the f/w wakes up on its' own, we waste
3712 * a reduce/increace cycle. However, trying to eliminate this overhead is
3713 * proved difficult, as the current state machine in the HIP lib does at
3714 * least a CMD52 to disable the interrupts before we configure the host
3715 * state.
3716 * ---------------------------------------------------------------------------
3717 */
3718CsrResult unifi_set_host_state(card_t *card, enum unifi_host_state state)
3719{
3720 CsrResult r = CSR_RESULT_SUCCESS;
3721 CsrResult csrResult;
3722 static const char *const states[] = {
3723 "AWAKE", "DROWSY", "TORPID"
3724 };
3725 static const u8 state_csr_host_wakeup[] = {
3726 1, 3, 0
3727 };
3728 static const u8 state_io_abort[] = {
3729 0, 2, 3
3730 };
3731
3732 unifi_trace(card->ospriv, UDBG4, "State %s to %s\n",
3733 states[card->host_state], states[state]);
3734
3735 if (card->host_state == UNIFI_HOST_STATE_TORPID)
3736 {
3737 CsrSdioFunctionActive(card->sdio_if);
3738 }
3739
3740 /* Write the new state to UniFi. */
3741 if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
3742 {
3743 r = sdio_write_f0(card, SDIO_CSR_HOST_WAKEUP,
3744 (u8)((card->function << 4) | state_csr_host_wakeup[state]));
3745 }
3746 else
3747 {
3748 r = sdio_write_f0(card, SDIO_IO_ABORT, state_io_abort[state]);
3749 }
3750
3751 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3752 {
3753 return r;
3754 }
3755 if (r != CSR_RESULT_SUCCESS)
3756 {
3757 unifi_error(card->ospriv, "Failed to write UniFi deep sleep state\n");
3758 }
3759 else
3760 {
3761 /*
3762 * If the chip was in state TORPID then we can now increase
3763 * the maximum bus clock speed.
3764 */
3765 if (card->host_state == UNIFI_HOST_STATE_TORPID)
3766 {
3767 csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if,
3768 UNIFI_SDIO_CLOCK_MAX_HZ);
3769 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
3770 /* Non-fatal error */
3771 if (r != CSR_RESULT_SUCCESS && r != CSR_WIFI_HIP_RESULT_NO_DEVICE)
3772 {
3773 unifi_warning(card->ospriv,
3774 "Failed to increase the SDIO clock speed\n");
3775 }
3776 else
3777 {
3778 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_MAX_HZ;
3779 }
3780 }
3781
3782 /*
3783 * Cache the current state in the card structure to avoid
3784 * unnecessary SDIO reads.
3785 */
3786 card->host_state = state;
3787
3788 if (state == UNIFI_HOST_STATE_TORPID)
3789 {
3790 /*
3791 * If the chip is now in state TORPID then we must now decrease
3792 * the maximum bus clock speed.
3793 */
3794 csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if,
3795 UNIFI_SDIO_CLOCK_SAFE_HZ);
3796 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
3797 if (r != CSR_RESULT_SUCCESS && r != CSR_WIFI_HIP_RESULT_NO_DEVICE)
3798 {
3799 unifi_warning(card->ospriv,
3800 "Failed to decrease the SDIO clock speed\n");
3801 }
3802 else
3803 {
3804 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
3805 }
3806 CsrSdioFunctionIdle(card->sdio_if);
3807 }
3808 }
3809
3810 return r;
3811} /* unifi_set_host_state() */
3812
3813
3814/*
3815 * ---------------------------------------------------------------------------
3816 * unifi_card_info
3817 *
3818 * Update the card information data structure
3819 *
3820 * Arguments:
3821 * card Pointer to card struct
3822 * card_info Pointer to info structure to update
3823 *
3824 * Returns:
3825 * None
3826 * ---------------------------------------------------------------------------
3827 */
3828void unifi_card_info(card_t *card, card_info_t *card_info)
3829{
3830 card_info->chip_id = card->chip_id;
3831 card_info->chip_version = card->chip_version;
3832 card_info->fw_build = card->build_id;
3833 card_info->fw_hip_version = card->config_data.version;
3834 card_info->sdio_block_size = card->sdio_io_block_size;
3835} /* unifi_card_info() */
3836
3837
3838/*
3839 * ---------------------------------------------------------------------------
3840 * unifi_check_io_status
3841 *
3842 * Check UniFi for spontaneous reset and pending interrupt.
3843 *
3844 * Arguments:
3845 * card Pointer to card struct
3846 * status Pointer to location to write chip status:
3847 * 0 if UniFi is running, and no interrupt pending
3848 * 1 if UniFi has spontaneously reset
3849 * 2 if there is a pending interrupt
3850 * Returns:
3851 * CSR_RESULT_SUCCESS if OK, or CSR error
3852 * ---------------------------------------------------------------------------
3853 */
3854CsrResult unifi_check_io_status(card_t *card, s32 *status)
3855{
3856 u8 io_en;
3857 CsrResult r;
3858 u8 pending;
3859
3860 *status = 0;
3861
3862 r = sdio_read_f0(card, SDIO_IO_ENABLE, &io_en);
3863 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
3864 {
3865 return r;
3866 }
3867 if (r != CSR_RESULT_SUCCESS)
3868 {
3869 unifi_error(card->ospriv, "Failed to read SDIO_IO_ENABLE to check for spontaneous reset\n");
3870 return r;
3871 }
3872
3873 if ((io_en & (1 << card->function)) == 0)
3874 {
3875 s32 fw_count;
3876 *status = 1;
3877 unifi_error(card->ospriv, "UniFi has spontaneously reset.\n");
3878
3879 /*
3880 * These reads are very likely to fail. We want to know if the function is really
3881 * disabled or the SDIO driver just returns rubbish.
3882 */
3883 fw_count = unifi_read_shared_count(card, card->sdio_ctrl_addr + 4);
3884 if (fw_count < 0)
3885 {
3886 unifi_error(card->ospriv, "Failed to read to-host sig written count\n");
3887 }
3888 else
3889 {
3890 unifi_error(card->ospriv, "thsw: %u (driver thinks is %u)\n",
3891 fw_count, card->to_host_signals_w);
3892 }
3893 fw_count = unifi_read_shared_count(card, card->sdio_ctrl_addr + 2);
3894 if (fw_count < 0)
3895 {
3896 unifi_error(card->ospriv, "Failed to read from-host sig read count\n");
3897 }
3898 else
3899 {
3900 unifi_error(card->ospriv, "fhsr: %u (driver thinks is %u)\n",
3901 fw_count, card->from_host_signals_r);
3902 }
3903
3904 return r;
3905 }
3906
3907 unifi_info(card->ospriv, "UniFi function %d is enabled.\n", card->function);
3908
3909 /* See if we missed an SDIO interrupt */
3910 r = CardPendingInt(card, &pending);
3911 if (pending)
3912 {
3913 unifi_error(card->ospriv, "There is an unhandled pending interrupt.\n");
3914 *status = 2;
3915 return r;
3916 }
3917
3918 return r;
3919} /* unifi_check_io_status() */
3920
3921
3922void unifi_get_hip_qos_info(card_t *card, unifi_HipQosInfo *hipqosinfo)
3923{
3924 s32 count_fhr;
3925 s16 t;
3926 u32 occupied_fh;
3927
3928 q_t *sigq;
3929 u16 nslots, i;
3930
3931 memset(hipqosinfo, 0, sizeof(unifi_HipQosInfo));
3932
3933 nslots = card->config_data.num_fromhost_data_slots;
3934
3935 for (i = 0; i < nslots; i++)
3936 {
3937 if (card->from_host_data[i].bd.data_length == 0)
3938 {
3939 hipqosinfo->free_fh_bulkdata_slots++;
3940 }
3941 }
3942
3943 for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
3944 {
3945 sigq = &card->fh_traffic_queue[i];
3946 t = sigq->q_wr_ptr - sigq->q_rd_ptr;
3947 if (t < 0)
3948 {
3949 t += sigq->q_length;
3950 }
3951 hipqosinfo->free_fh_sig_queue_slots[i] = (sigq->q_length - t) - 1;
3952 }
3953
3954 count_fhr = unifi_read_shared_count(card, card->sdio_ctrl_addr + 2);
3955 if (count_fhr < 0)
3956 {
3957 unifi_error(card->ospriv, "Failed to read from-host sig read count - %d\n", count_fhr);
3958 hipqosinfo->free_fh_fw_slots = 0xfa;
3959 return;
3960 }
3961
3962 occupied_fh = (card->from_host_signals_w - count_fhr) % 128;
3963
3964 hipqosinfo->free_fh_fw_slots = (u16)(card->config_data.num_fromhost_sig_frags - occupied_fh);
3965}
3966
3967
3968
3969CsrResult ConvertCsrSdioToCsrHipResult(card_t *card, CsrResult csrResult)
3970{
3971 CsrResult r = CSR_RESULT_FAILURE;
3972
3973 switch (csrResult)
3974 {
3975 case CSR_RESULT_SUCCESS:
3976 r = CSR_RESULT_SUCCESS;
3977 break;
3978 /* Timeout errors */
3979 case CSR_SDIO_RESULT_TIMEOUT:
3980 /* Integrity errors */
3981 case CSR_SDIO_RESULT_CRC_ERROR:
3982 r = CSR_RESULT_FAILURE;
3983 break;
3984 case CSR_SDIO_RESULT_NO_DEVICE:
3985 r = CSR_WIFI_HIP_RESULT_NO_DEVICE;
3986 break;
3987 case CSR_SDIO_RESULT_INVALID_VALUE:
3988 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
3989 break;
3990 case CSR_RESULT_FAILURE:
3991 r = CSR_RESULT_FAILURE;
3992 break;
3993 default:
3994 unifi_warning(card->ospriv, "Unrecognised csrResult error code: %d\n", csrResult);
3995 break;
3996 }
3997
3998 return r;
3999} /* ConvertCsrSdioToCsrHipResult() */
4000
4001
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio.h b/drivers/staging/csr/csr_wifi_hip_card_sdio.h
deleted file mode 100644
index a9b9ec427320..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio.h
+++ /dev/null
@@ -1,694 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ---------------------------------------------------------------------------
13 *
14 * FILE: csr_wifi_hip_card_sdio.h
15 *
16 * PURPOSE:
17 * Internal header for Card API for SDIO.
18 * ---------------------------------------------------------------------------
19 */
20#ifndef __CARD_SDIO_H__
21#define __CARD_SDIO_H__
22
23#include "csr_wifi_hip_unifi.h"
24#include "csr_wifi_hip_unifi_udi.h"
25#include "csr_wifi_hip_unifihw.h"
26#include "csr_wifi_hip_unifiversion.h"
27#ifndef CSR_WIFI_HIP_TA_DISABLE
28#include "csr_wifi_hip_ta_sampling.h"
29#endif
30#include "csr_wifi_hip_xbv.h"
31#include "csr_wifi_hip_chiphelper.h"
32
33
34/*
35 *
36 * Configuration items.
37 * Which of these should go in a platform unifi_config.h file?
38 *
39 */
40
41/*
42 * When the traffic queues contain more signals than there is space for on
43 * UniFi, a limiting algorithm comes into play.
44 * If a traffic queue has enough slots free to buffer more traffic from the
45 * network stack, then the following check is applied. The number of free
46 * slots is RESUME_XMIT_THRESHOLD.
47 */
48#define RESUME_XMIT_THRESHOLD 4
49
50
51/*
52 * When reading signals from UniFi, the host processes pending all signals
53 * and then acknowledges them together in a single write to update the
54 * to-host-chunks-read location.
55 * When there is more than one bulk data transfer (e.g. one received data
56 * packet and a request for the payload data of a transmitted packet), the
57 * update can be delayed significantly. This ties up resources on chip.
58 *
59 * To remedy this problem, to-host-chunks-read is updated after processing
60 * a signal if TO_HOST_FLUSH_THRESHOLD bytes of bulk data have been
61 * transferred since the last update.
62 */
63#define TO_HOST_FLUSH_THRESHOLD (500 * 5)
64
65
66/* SDIO Card Common Control Registers */
67#define SDIO_CCCR_SDIO_REVISION (0x00)
68#define SDIO_SD_SPEC_REVISION (0x01)
69#define SDIO_IO_ENABLE (0x02)
70#define SDIO_IO_READY (0x03)
71#define SDIO_INT_ENABLE (0x04)
72#define SDIO_INT_PENDING (0x05)
73#define SDIO_IO_ABORT (0x06)
74#define SDIO_BUS_IFACE_CONTROL (0x07)
75#define SDIO_CARD_CAPABILOTY (0x08)
76#define SDIO_COMMON_CIS_POINTER (0x09)
77#define SDIO_BUS_SUSPEND (0x0C)
78#define SDIO_FUNCTION_SELECT (0x0D)
79#define SDIO_EXEC_FLAGS (0x0E)
80#define SDIO_READY_FLAGS (0x0F)
81#define SDIO_FN0_BLOCK_SIZE (0x10)
82#define SDIO_POWER_CONTROL (0x12)
83#define SDIO_VENDOR_START (0xF0)
84
85#define SDIO_CSR_HOST_WAKEUP (0xf0)
86#define SDIO_CSR_HOST_INT_CLEAR (0xf1)
87#define SDIO_CSR_FROM_HOST_SCRATCH0 (0xf2)
88#define SDIO_CSR_FROM_HOST_SCRATCH1 (0xf3)
89#define SDIO_CSR_TO_HOST_SCRATCH0 (0xf4)
90#define SDIO_CSR_TO_HOST_SCRATCH1 (0xf5)
91#define SDIO_CSR_FUNC_EN (0xf6)
92#define SDIO_CSR_CSPI_MODE (0xf7)
93#define SDIO_CSR_CSPI_STATUS (0xf8)
94#define SDIO_CSR_CSPI_PADDING (0xf9)
95
96
97#define UNIFI_SD_INT_ENABLE_IENM 0x0001 /* Master INT Enable */
98
99#ifdef CSR_PRE_ALLOC_NET_DATA
100#define BULK_DATA_PRE_ALLOC_NUM 16
101#endif
102
103/*
104 * Structure to hold configuration information read from UniFi.
105 */
106typedef struct
107{
108 /*
109 * The version of the SDIO signal queues and bulk data pools
110 * configuration structure. The MSB is the major version number, used to
111 * indicate incompatible changes. The LSB gives the minor revision number,
112 * used to indicate changes that maintain backwards compatibility.
113 */
114 u16 version;
115
116 /*
117 * offset from the start of the shared data memory to the SD IO
118 * control structure.
119 */
120 u16 sdio_ctrl_offset;
121
122 /* Buffer handle of the from-host signal queue */
123 u16 fromhost_sigbuf_handle;
124
125 /* Buffer handle of the to-host signal queue */
126 u16 tohost_sigbuf_handle;
127
128 /*
129 * Maximum number of signal primitive or bulk data command fragments that may be
130 * pending in the to-hw signal queue.
131 */
132 u16 num_fromhost_sig_frags;
133
134 /*
135 * Number of signal primitive or bulk data command fragments that must be pending
136 * in the to-host signal queue before the host will generate an interrupt
137 * to indicate that it has read a signal. This will usually be the total
138 * capacity of the to-host signal buffer less the size of the largest signal
139 * primitive divided by the signal primitive fragment size, but may be set
140 * to 1 to request interrupts every time that the host read a signal.
141 * Note that the hw may place more signals in the to-host signal queue
142 * than indicated by this field.
143 */
144 u16 num_tohost_sig_frags;
145
146 /*
147 * Number of to-hw bulk data slots. Slots are numbered from 0 (zero) to
148 * one less than the value in this field
149 */
150 u16 num_fromhost_data_slots;
151
152 /*
153 * Number of frm-hw bulk data slots. Slots are numbered from 0 (zero) to
154 * one less than the value in this field
155 */
156 u16 num_tohost_data_slots;
157
158 /*
159 * Size of the bulk data slots (2 octets)
160 * The size of the bulk data slots in octets. This will usually be
161 * the size of the largest MSDU. The value should always be even.
162 */
163 u16 data_slot_size;
164
165 /*
166 * Indicates that the host has finished the initialisation sequence.
167 * Initialised to 0x0000 by the firmware, and set to 0x0001 by us.
168 */
169 u16 initialised;
170
171 /* Added by protocol version 0x0001 */
172 u32 overlay_size;
173
174 /* Added by protocol version 0x0300 */
175 u16 data_slot_round;
176 u16 sig_frag_size;
177
178 /* Added by protocol version 0x0500 */
179 u16 tohost_signal_padding;
180} sdio_config_data_t;
181
182/*
183 * These values may change with versions of the Host Interface Protocol.
184 */
185/*
186 * Size of config info block pointed to by the CSR_SLT_SDIO_SLOT_CONFIG
187 * entry in the f/w symbol table
188 */
189#define SDIO_CONFIG_DATA_SIZE 30
190
191/* Offset of the INIT flag in the config info block. */
192#define SDIO_INIT_FLAG_OFFSET 0x12
193#define SDIO_TO_HOST_SIG_PADDING_OFFSET 0x1C
194
195
196/* Structure for a bulk data transfer command */
197typedef struct
198{
199 u16 cmd_and_len; /* bits 12-15 cmd, bits 0-11 len */
200 u16 data_slot; /* slot number, perhaps OR'd with SLOT_DIR_TO_HOST */
201 u16 offset;
202 u16 buffer_handle;
203} bulk_data_cmd_t;
204
205
206/* Bulk Data signal command values */
207#define SDIO_CMD_SIGNAL 0x00
208#define SDIO_CMD_TO_HOST_TRANSFER 0x01
209#define SDIO_CMD_TO_HOST_TRANSFER_ACK 0x02 /*deprecated*/
210#define SDIO_CMD_FROM_HOST_TRANSFER 0x03
211#define SDIO_CMD_FROM_HOST_TRANSFER_ACK 0x04 /*deprecated*/
212#define SDIO_CMD_CLEAR_SLOT 0x05
213#define SDIO_CMD_OVERLAY_TRANSFER 0x06
214#define SDIO_CMD_OVERLAY_TRANSFER_ACK 0x07 /*deprecated*/
215#define SDIO_CMD_FROM_HOST_AND_CLEAR 0x08
216#define SDIO_CMD_PADDING 0x0f
217
218#define SLOT_DIR_TO_HOST 0x8000
219
220
221/* Initialise bulkdata slot
222 * params:
223 * bulk_data_desc_t *bulk_data_slot
224 */
225#define UNIFI_INIT_BULK_DATA(bulk_data_slot) \
226 { \
227 (bulk_data_slot)->os_data_ptr = NULL; \
228 (bulk_data_slot)->data_length = 0; \
229 (bulk_data_slot)->os_net_buf_ptr = NULL; \
230 (bulk_data_slot)->net_buf_length = 0; \
231 }
232
233/*
234 * Structure to contain a SIGNAL datagram.
235 * This is used to build signal queues between the main driver and the
236 * i/o thread.
237 * The fields are:
238 * sigbuf Contains the HIP signal is wire-format (i.e. packed,
239 * little-endian)
240 * bulkdata Contains a copy of any associated bulk data
241 * signal_length The size of the signal in the sigbuf
242 */
243typedef struct card_signal
244{
245 u8 sigbuf[UNIFI_PACKED_SIGBUF_SIZE];
246
247 /* Length of the SIGNAL inside sigbuf */
248 u16 signal_length;
249
250 bulk_data_desc_t bulkdata[UNIFI_MAX_DATA_REFERENCES];
251} card_signal_t;
252
253
254/*
255 * Control structure for a generic ring buffer.
256 */
257#define UNIFI_QUEUE_NAME_MAX_LENGTH 16
258typedef struct
259{
260 card_signal_t *q_body;
261
262 /* Num elements in queue (capacity is one less than this!) */
263 u16 q_length;
264
265 u16 q_wr_ptr;
266 u16 q_rd_ptr;
267
268 char name[UNIFI_QUEUE_NAME_MAX_LENGTH];
269} q_t;
270
271
272#define UNIFI_RESERVED_COMMAND_SLOTS 2
273
274/* Considering approx 500 us per packet giving 0.5 secs */
275#define UNIFI_PACKETS_INTERVAL 1000
276
277/*
278 * Dynamic slot reservation for QoS
279 */
280typedef struct
281{
282 u16 from_host_used_slots[UNIFI_NO_OF_TX_QS];
283 u16 from_host_max_slots[UNIFI_NO_OF_TX_QS];
284 u16 from_host_reserved_slots[UNIFI_NO_OF_TX_QS];
285
286 /* Parameters to determine if a queue was active.
287 If number of packets sent is greater than the threshold
288 for the queue, the queue is considered active and no
289 re reservation is done, it is important not to keep this
290 value too low */
291 /* Packets sent during this interval */
292 u16 packets_txed[UNIFI_NO_OF_TX_QS];
293 u16 total_packets_txed;
294
295 /* Number of packets to see if slots need to be reassigned */
296 u16 packets_interval;
297
298 /* Once a queue reaches a stable state, avoid processing */
299 u8 queue_stable[UNIFI_NO_OF_TX_QS];
300} card_dynamic_slot_t;
301
302
303/* These are type-safe and don't write incorrect values to the
304 * structure. */
305
306/* Return queue slots used count
307 * params:
308 * const q_t *q
309 * returns:
310 * u16
311 */
312#define CSR_WIFI_HIP_Q_SLOTS_USED(q) \
313 (((q)->q_wr_ptr - (q)->q_rd_ptr < 0)? \
314 ((q)->q_wr_ptr - (q)->q_rd_ptr + (q)->q_length) : ((q)->q_wr_ptr - (q)->q_rd_ptr))
315
316/* Return queue slots free count
317 * params:
318 * const q_t *q
319 * returns:
320 * u16
321 */
322#define CSR_WIFI_HIP_Q_SLOTS_FREE(q) \
323 ((q)->q_length - CSR_WIFI_HIP_Q_SLOTS_USED((q)) - 1)
324
325/* Return slot signal data pointer
326 * params:
327 * const q_t *q
328 * u16 slot
329 * returns:
330 * card_signal_t *
331 */
332#define CSR_WIFI_HIP_Q_SLOT_DATA(q, slot) \
333 ((q)->q_body + slot)
334
335/* Return queue next read slot
336 * params:
337 * const q_t *q
338 * returns:
339 * u16 slot offset
340 */
341#define CSR_WIFI_HIP_Q_NEXT_R_SLOT(q) \
342 ((q)->q_rd_ptr)
343
344/* Return queue next write slot
345 * params:
346 * const q_t *q
347 * returns:
348 * u16 slot offset
349 */
350#define CSR_WIFI_HIP_Q_NEXT_W_SLOT(q) \
351 ((q)->q_wr_ptr)
352
353/* Return updated queue pointer wrapped around its length
354 * params:
355 * const q_t *q
356 * u16 x amount to add to queue pointer
357 * returns:
358 * u16 wrapped queue pointer
359 */
360#define CSR_WIFI_HIP_Q_WRAP(q, x) \
361 ((((x) >= (q)->q_length)?((x) % (q)->q_length) : (x)))
362
363/* Advance queue read pointer
364 * params:
365 * const q_t *q
366 */
367#define CSR_WIFI_HIP_Q_INC_R(q) \
368 ((q)->q_rd_ptr = CSR_WIFI_HIP_Q_WRAP((q), (q)->q_rd_ptr + 1))
369
370/* Advance queue write pointer
371 * params:
372 * const q_t *q
373 */
374#define CSR_WIFI_HIP_Q_INC_W(q) \
375 ((q)->q_wr_ptr = CSR_WIFI_HIP_Q_WRAP((q), (q)->q_wr_ptr + 1))
376
377enum unifi_host_state
378{
379 UNIFI_HOST_STATE_AWAKE = 0,
380 UNIFI_HOST_STATE_DROWSY = 1,
381 UNIFI_HOST_STATE_TORPID = 2
382};
383
384typedef struct
385{
386 bulk_data_desc_t bd;
387 unifi_TrafficQueue queue; /* Used for dynamic slot reservation */
388} slot_desc_t;
389
390/*
391 * Structure describing a UniFi SDIO card.
392 */
393struct card
394{
395 /*
396 * Back pointer for the higher level OS code. This is passed as
397 * an argument to callbacks (e.g. for received data and indications).
398 */
399 void *ospriv;
400
401 /*
402 * mapping of HIP slot to MA-PACKET.req host tag, the
403 * array is indexed by slot numbers and each index stores
404 * information of the last host tag it was used for
405 */
406 u32 *fh_slot_host_tag_record;
407
408
409 /* Info read from Symbol Table during probe */
410 u32 build_id;
411 char build_id_string[128];
412
413 /* Retrieve from SDIO driver. */
414 u16 chip_id;
415
416 /* Read from GBL_CHIP_VERSION. */
417 u16 chip_version;
418
419 /* From the SDIO driver (probably 1) */
420 u8 function;
421
422 /* This is sused to get the register addresses and things. */
423 ChipDescript *helper;
424
425 /*
426 * Bit mask of PIOs for the loader to waggle during download.
427 * We assume these are connected to LEDs. The main firmware gets
428 * the mask from a MIB entry.
429 */
430 s32 loader_led_mask;
431
432 /*
433 * Support for flow control. When the from-host queue of signals
434 * is full, we ask the host upper layer to stop sending packets. When
435 * the queue drains we tell it that it can send packets again.
436 * We use this flag to remember the current state.
437 */
438#define card_is_tx_q_paused(card, q) (card->tx_q_paused_flag[q])
439#define card_tx_q_unpause(card, q) (card->tx_q_paused_flag[q] = 0)
440#define card_tx_q_pause(card, q) (card->tx_q_paused_flag[q] = 1)
441
442 u16 tx_q_paused_flag[UNIFI_TRAFFIC_Q_MAX + 1 + UNIFI_NO_OF_TX_QS]; /* defensive more than big enough */
443
444 /* UDI callback for logging UniFi interactions */
445 udi_func_t udi_hook;
446
447 u8 bh_reason_host;
448 u8 bh_reason_unifi;
449
450 /* SDIO clock speed request from OS layer */
451 u8 request_max_clock;
452
453 /* Last SDIO clock frequency set */
454 u32 sdio_clock_speed;
455
456 /*
457 * Current host state (copy of value in IOABORT register and
458 * spinlock to protect it.
459 */
460 enum unifi_host_state host_state;
461
462 enum unifi_low_power_mode low_power_mode;
463 enum unifi_periodic_wake_mode periodic_wake_mode;
464
465 /*
466 * Ring buffer of signal structs for a queue of data packets from
467 * the host.
468 * The queue is empty when fh_data_q_num_rd == fh_data_q_num_wr.
469 * To add a packet to the queue, copy it to index given by
470 * (fh_data_q_num_wr%UNIFI_SOFT_Q_LENGTH) and advance fh_data_q_num_wr.
471 * To take a packet from the queue, copy data from index given by
472 * (fh_data_q_num_rd%UNIFI_SOFT_Q_LENGTH) and advance fh_data_q_num_rd.
473 * fh_data_q_num_rd and fh_data_q_num_rd are both modulo 256.
474 */
475 card_signal_t fh_command_q_body[UNIFI_SOFT_COMMAND_Q_LENGTH];
476 q_t fh_command_queue;
477
478 card_signal_t fh_traffic_q_body[UNIFI_NO_OF_TX_QS][UNIFI_SOFT_TRAFFIC_Q_LENGTH];
479 q_t fh_traffic_queue[UNIFI_NO_OF_TX_QS];
480
481 /*
482 * Signal counts from UniFi SDIO Control Data Structure.
483 * These are cached and synchronised with the UniFi before and after
484 * a batch of operations.
485 *
486 * These are the modulo-256 count of signals written to or read from UniFi
487 * The value is incremented for every signal.
488 */
489 s32 from_host_signals_w;
490 s32 from_host_signals_r;
491 s32 to_host_signals_r;
492 s32 to_host_signals_w;
493
494
495 /* Should specify buffer size as a number of signals */
496 /*
497 * Enough for 10 th and 10 fh data slots:
498 * 1 * 10 * 8 = 80
499 * 2 * 10 * 8 = 160
500 */
501#define UNIFI_FH_BUF_SIZE 1024
502 struct sigbuf
503 {
504 u8 *buf; /* buffer area */
505 u8 *ptr; /* current pos */
506 u16 count; /* signal count */
507 u16 bufsize;
508 } fh_buffer;
509 struct sigbuf th_buffer;
510
511
512 /*
513 * Field to use for the incrementing value to write to the UniFi
514 * SHARED_IO_INTERRUPT register.
515 * Flag to say we need to generate an interrupt at end of processing.
516 */
517 u32 unifi_interrupt_seq;
518 u8 generate_interrupt;
519
520
521 /* Pointers to the bulk data slots */
522 slot_desc_t *from_host_data;
523 bulk_data_desc_t *to_host_data;
524
525
526 /*
527 * Index of the next (hopefully) free data slot.
528 * This is an optimisation that starts searching at a more likely point
529 * than the beginning.
530 */
531 s16 from_host_data_head;
532
533 /* Dynamic slot allocation for queues */
534 card_dynamic_slot_t dynamic_slot_data;
535
536 /*
537 * SDIO specific fields
538 */
539
540 /* Interface pointer for the SDIO library */
541 CsrSdioFunction *sdio_if;
542
543 /* Copy of config_data struct from the card */
544 sdio_config_data_t config_data;
545
546 /* SDIO address of the Initialised flag and Control Data struct */
547 u32 init_flag_addr;
548 u32 sdio_ctrl_addr;
549
550 /* The last value written to the Shared Data Memory Page register */
551 u32 proc_select;
552 u32 dmem_page;
553 u32 pmem_page;
554
555 /* SDIO traffic counters limited to 32 bits for Synergy compatibility */
556 u32 sdio_bytes_read;
557 u32 sdio_bytes_written;
558
559 u8 memory_resources_allocated;
560
561 /* UniFi SDIO I/O Block size. */
562 u16 sdio_io_block_size;
563
564 /* Pad transfer sizes to SDIO block boundaries */
565 u8 sdio_io_block_pad;
566
567 /* Read from the XBV */
568 struct FWOV fwov;
569
570#ifndef CSR_WIFI_HIP_TA_DISABLE
571 /* TA sampling */
572 ta_data_t ta_sampling;
573#endif
574
575 /* Auto-coredump */
576 s16 request_coredump_on_reset; /* request coredump on next reset */
577 struct coredump_buf *dump_buf; /* root node */
578 struct coredump_buf *dump_next_write; /* node to fill at next dump */
579 struct coredump_buf *dump_cur_read; /* valid node to read, or NULL */
580
581#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
582 struct cmd_profile
583 {
584 u32 cmd52_count;
585 u32 cmd53_count;
586 u32 tx_count;
587 u32 tx_cfm_count;
588 u32 rx_count;
589 u32 bh_count;
590 u32 process_count;
591 u32 protocol_count;
592
593 u32 cmd52_f0_r_count;
594 u32 cmd52_f0_w_count;
595 u32 cmd52_r8or16_count;
596 u32 cmd52_w8or16_count;
597 u32 cmd52_r16_count;
598 u32 cmd52_w16_count;
599 u32 cmd52_r32_count;
600
601 u32 sdio_cmd_signal;
602 u32 sdio_cmd_clear_slot;
603 u32 sdio_cmd_to_host;
604 u32 sdio_cmd_from_host;
605 u32 sdio_cmd_from_host_and_clear;
606 } hip_prof;
607 struct cmd_profile cmd_prof;
608#endif
609
610 /* Interrupt processing mode flags */
611 u32 intmode;
612
613#ifdef UNIFI_DEBUG
614 u8 lsb;
615#endif
616
617 /* Historic firmware panic codes */
618 u32 panic_data_phy_addr;
619 u32 panic_data_mac_addr;
620 u16 last_phy_panic_code;
621 u16 last_phy_panic_arg;
622 u16 last_mac_panic_code;
623 u16 last_mac_panic_arg;
624#ifdef CSR_PRE_ALLOC_NET_DATA
625 bulk_data_desc_t bulk_data_desc_list[BULK_DATA_PRE_ALLOC_NUM];
626 u16 prealloc_netdata_r;
627 u16 prealloc_netdata_w;
628#endif
629}; /* struct card */
630
631
632/* Reset types */
633enum unifi_reset_type
634{
635 UNIFI_COLD_RESET = 1,
636 UNIFI_WARM_RESET = 2
637};
638
639/*
640 * unifi_set_host_state() implements signalling for waking UniFi from
641 * deep sleep. The host indicates to UniFi that it is in one of three states:
642 * Torpid - host has nothing to send, UniFi can go to sleep.
643 * Drowsy - host has data to send to UniFi. UniFi will respond with an
644 * SDIO interrupt. When hosts responds it moves to Awake.
645 * Awake - host has data to transfer, UniFi must stay awake.
646 * When host has finished, it moves to Torpid.
647 */
648CsrResult unifi_set_host_state(card_t *card, enum unifi_host_state state);
649
650
651CsrResult unifi_set_proc_select(card_t *card, enum unifi_dbg_processors_select select);
652s32 card_read_signal_counts(card_t *card);
653bulk_data_desc_t* card_find_data_slot(card_t *card, s16 slot);
654
655
656CsrResult unifi_read32(card_t *card, u32 unifi_addr, u32 *pdata);
657CsrResult unifi_readnz(card_t *card, u32 unifi_addr,
658 void *pdata, u16 len);
659s32 unifi_read_shared_count(card_t *card, u32 addr);
660
661CsrResult unifi_writen(card_t *card, u32 unifi_addr, void *pdata, u16 len);
662
663CsrResult unifi_bulk_rw(card_t *card, u32 handle,
664 void *pdata, u32 len, s16 direction);
665CsrResult unifi_bulk_rw_noretry(card_t *card, u32 handle,
666 void *pdata, u32 len, s16 direction);
667#define UNIFI_SDIO_READ 0
668#define UNIFI_SDIO_WRITE 1
669
670CsrResult unifi_read_8_or_16(card_t *card, u32 unifi_addr, u8 *pdata);
671CsrResult unifi_write_8_or_16(card_t *card, u32 unifi_addr, u8 data);
672CsrResult unifi_read_direct_8_or_16(card_t *card, u32 addr, u8 *pdata);
673CsrResult unifi_write_direct_8_or_16(card_t *card, u32 addr, u8 data);
674
675CsrResult unifi_read_direct16(card_t *card, u32 addr, u16 *pdata);
676CsrResult unifi_read_direct32(card_t *card, u32 addr, u32 *pdata);
677CsrResult unifi_read_directn(card_t *card, u32 addr, void *pdata, u16 len);
678
679CsrResult unifi_write_direct16(card_t *card, u32 addr, u16 data);
680CsrResult unifi_write_directn(card_t *card, u32 addr, void *pdata, u16 len);
681
682CsrResult sdio_read_f0(card_t *card, u32 addr, u8 *pdata);
683CsrResult sdio_write_f0(card_t *card, u32 addr, u8 data);
684
685void unifi_read_panic(card_t *card);
686#ifdef CSR_PRE_ALLOC_NET_DATA
687void prealloc_netdata_free(card_t *card);
688CsrResult prealloc_netdata_alloc(card_t *card);
689#endif
690/* For diagnostic use */
691void dump(void *mem, u16 len);
692void dump16(void *mem, u16 len);
693
694#endif /* __CARD_SDIO_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c b/drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c
deleted file mode 100644
index cfe186e07071..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c
+++ /dev/null
@@ -1,2595 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2012
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ---------------------------------------------------------------------------
13 * FILE: csr_wifi_hip_card_sdio_intr.c
14 *
15 * PURPOSE:
16 * Interrupt processing for the UniFi SDIO driver.
17 *
18 * We may need another signal queue of responses to UniFi to hold
19 * bulk data commands generated by read_to_host_signals().
20 *
21 * ---------------------------------------------------------------------------
22 */
23#undef CSR_WIFI_HIP_NOISY
24
25#include "csr_wifi_hip_unifi.h"
26#include "csr_wifi_hip_conversions.h"
27#include "csr_wifi_hip_card.h"
28#include "csr_wifi_hip_xbv.h"
29
30
31/*
32 * If the SDIO link is idle for this time (in milliseconds),
33 * signal UniFi to go into Deep Sleep.
34 * Valid return value of unifi_bh().
35 */
36#define UNIFI_DEFAULT_HOST_IDLE_TIMEOUT 5
37/*
38 * If the UniFi has not woken up for this time (in milliseconds),
39 * signal the bottom half to take action.
40 * Valid return value of unifi_bh().
41 */
42#define UNIFI_DEFAULT_WAKE_TIMEOUT 1000
43
44
45static CsrResult process_bh(card_t *card);
46static CsrResult handle_host_protocol(card_t *card, u8 *processed_something);
47
48static CsrResult flush_fh_buffer(card_t *card);
49
50static CsrResult check_fh_sig_slots(card_t *card, u16 needed, s32 *space);
51
52static CsrResult read_to_host_signals(card_t *card, s32 *processed);
53static CsrResult process_to_host_signals(card_t *card, s32 *processed);
54
55static CsrResult process_bulk_data_command(card_t *card,
56 const u8 *cmdptr,
57 s16 cmd, u16 len);
58static CsrResult process_clear_slot_command(card_t *card,
59 const u8 *cmdptr);
60static CsrResult process_fh_cmd_queue(card_t *card, s32 *processed);
61static CsrResult process_fh_traffic_queue(card_t *card, s32 *processed);
62static void restart_packet_flow(card_t *card);
63static CsrResult process_clock_request(card_t *card);
64
65#ifdef CSR_WIFI_HIP_NOISY
66s16 dump_fh_buf = 0;
67#endif /* CSR_WIFI_HIP_NOISY */
68
69#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
70
71/*
72 * The unifi_debug_output buffer can be used to debug the HIP behaviour offline
73 * i.e. without using the tracing functions that change the timing.
74 *
75 * Call unifi_debug_log_to_buf() with printf arguments to store a string into
76 * unifi_debug_output. When unifi_debug_buf_dump() is called, the contents of the
77 * buffer are dumped with dump_str() which has to be implemented in the
78 * OS layer, during the porting exercise. The offset printed, holds the
79 * offset where the last character is (always a zero).
80 *
81 */
82
83#define UNIFI_DEBUG_GBUFFER_SIZE 8192
84static char unifi_debug_output[UNIFI_DEBUG_GBUFFER_SIZE];
85static char *unifi_dbgbuf_ptr = unifi_debug_output;
86static char *unifi_dbgbuf_start = unifi_debug_output;
87
88static void append_char(char c)
89{
90 /* write char and advance pointer */
91 *unifi_dbgbuf_ptr++ = c;
92 /* wrap pointer at end of buffer */
93 if ((unifi_dbgbuf_ptr - unifi_debug_output) >= UNIFI_DEBUG_GBUFFER_SIZE)
94 {
95 unifi_dbgbuf_ptr = unifi_debug_output;
96 }
97} /* append_char() */
98
99
100void unifi_debug_string_to_buf(const char *str)
101{
102 const char *p = str;
103 while (*p)
104 {
105 append_char(*p);
106 p++;
107 }
108 /* Update start-of-buffer pointer */
109 unifi_dbgbuf_start = unifi_dbgbuf_ptr + 1;
110 if ((unifi_dbgbuf_start - unifi_debug_output) >= UNIFI_DEBUG_GBUFFER_SIZE)
111 {
112 unifi_dbgbuf_start = unifi_debug_output;
113 }
114}
115
116
117void unifi_debug_log_to_buf(const char *fmt, ...)
118{
119#define DEBUG_BUFFER_SIZE 80
120 static char s[DEBUG_BUFFER_SIZE];
121 va_list args;
122
123 va_start(args, fmt);
124 vsnprintf(s, DEBUG_BUFFER_SIZE, fmt, args);
125 va_end(args);
126
127 unifi_debug_string_to_buf(s);
128} /* unifi_debug_log_to_buf() */
129
130
131/* Convert signed 32 bit (or less) integer to string */
132static void CsrUInt16ToHex(u16 number, char *str)
133{
134 u16 index;
135 u16 currentValue;
136
137 for (index = 0; index < 4; index++)
138 {
139 currentValue = (u16) (number & 0x000F);
140 number >>= 4;
141 str[3 - index] = (char) (currentValue > 9 ? currentValue + 55 : currentValue + '0');
142 }
143 str[4] = '\0';
144}
145
146
147/*
148 * ---------------------------------------------------------------------------
149 * unifi_debug_hex_to_buf
150 *
151 * puts the contents of the passed buffer into the debug buffer as a hex string
152 *
153 * Arguments:
154 * buff buffer to print as hex
155 * length number of chars to print
156 *
157 * Returns:
158 * None.
159 *
160 * ---------------------------------------------------------------------------
161 */
162void unifi_debug_hex_to_buf(const char *buff, u16 length)
163{
164 char s[5];
165 u16 i;
166
167 for (i = 0; i < length; i = i + 2)
168 {
169 CsrUInt16ToHex(*((u16 *)(buff + i)), s);
170 unifi_debug_string_to_buf(s);
171 }
172}
173
174
175void unifi_debug_buf_dump(void)
176{
177 s32 offset = unifi_dbgbuf_ptr - unifi_debug_output;
178
179 unifi_error(NULL, "HIP debug buffer offset=%d\n", offset);
180 dump_str(unifi_debug_output + offset, UNIFI_DEBUG_GBUFFER_SIZE - offset);
181 dump_str(unifi_debug_output, offset);
182} /* unifi_debug_buf_dump() */
183
184
185#endif /* CSR_WIFI_HIP_DEBUG_OFFLINE */
186
187#ifdef CSR_PRE_ALLOC_NET_DATA
188#define NETDATA_PRE_ALLOC_BUF_SIZE 8000
189
190void prealloc_netdata_free(card_t *card)
191{
192 unifi_warning(card->ospriv, "prealloc_netdata_free: IN: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
193
194 while (card->bulk_data_desc_list[card->prealloc_netdata_r].data_length != 0)
195 {
196 unifi_warning(card->ospriv, "prealloc_netdata_free: r=%d\n", card->prealloc_netdata_r);
197
198 unifi_net_data_free(card->ospriv, &card->bulk_data_desc_list[card->prealloc_netdata_r]);
199 card->prealloc_netdata_r++;
200 card->prealloc_netdata_r %= BULK_DATA_PRE_ALLOC_NUM;
201 }
202 card->prealloc_netdata_r = card->prealloc_netdata_w = 0;
203
204 unifi_warning(card->ospriv, "prealloc_netdata_free: OUT: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
205}
206
207
208CsrResult prealloc_netdata_alloc(card_t *card)
209{
210 CsrResult r;
211
212 unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_alloc: IN: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
213
214 while (card->bulk_data_desc_list[card->prealloc_netdata_w].data_length == 0)
215 {
216 r = unifi_net_data_malloc(card->ospriv, &card->bulk_data_desc_list[card->prealloc_netdata_w], NETDATA_PRE_ALLOC_BUF_SIZE);
217 if (r != CSR_RESULT_SUCCESS)
218 {
219 unifi_error(card->ospriv, "prealloc_netdata_alloc: Failed to allocate t-h bulk data\n");
220 return CSR_RESULT_FAILURE;
221 }
222 card->prealloc_netdata_w++;
223 card->prealloc_netdata_w %= BULK_DATA_PRE_ALLOC_NUM;
224 }
225 unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_alloc: OUT: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
226
227 return CSR_RESULT_SUCCESS;
228}
229
230
231static CsrResult prealloc_netdata_get(card_t *card, bulk_data_desc_t *bulk_data_slot, u32 size)
232{
233 CsrResult r;
234
235 unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_get: IN: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
236
237 if (card->bulk_data_desc_list[card->prealloc_netdata_r].data_length == 0)
238 {
239 unifi_error(card->ospriv, "prealloc_netdata_get: data_length = 0\n");
240 }
241
242 if ((size > NETDATA_PRE_ALLOC_BUF_SIZE) || (card->bulk_data_desc_list[card->prealloc_netdata_r].data_length == 0))
243 {
244 unifi_warning(card->ospriv, "prealloc_netdata_get: Calling net_data_malloc\n");
245
246 r = unifi_net_data_malloc(card->ospriv, bulk_data_slot, size);
247 if (r != CSR_RESULT_SUCCESS)
248 {
249 unifi_error(card->ospriv, "prealloc_netdata_get: Failed to allocate t-h bulk data\n");
250 return CSR_RESULT_FAILURE;
251 }
252 return CSR_RESULT_SUCCESS;
253 }
254
255 *bulk_data_slot = card->bulk_data_desc_list[card->prealloc_netdata_r];
256 card->bulk_data_desc_list[card->prealloc_netdata_r].os_data_ptr = NULL;
257 card->bulk_data_desc_list[card->prealloc_netdata_r].os_net_buf_ptr = NULL;
258 card->bulk_data_desc_list[card->prealloc_netdata_r].net_buf_length = 0;
259 card->bulk_data_desc_list[card->prealloc_netdata_r].data_length = 0;
260
261 card->prealloc_netdata_r++;
262 card->prealloc_netdata_r %= BULK_DATA_PRE_ALLOC_NUM;
263
264 unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_get: OUT: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
265
266 return CSR_RESULT_SUCCESS;
267}
268
269
270#endif
271
272/*
273 * ---------------------------------------------------------------------------
274 * unifi_sdio_interrupt_handler
275 *
276 * This function should be called by the OS-dependent code to handle
277 * an SDIO interrupt from the UniFi.
278 *
279 * Arguments:
280 * card Pointer to card context structure.
281 *
282 * Returns:
283 * None.
284 *
285 * Notes: This function may be called in DRS context. In this case,
286 * tracing with the unifi_trace(), etc, is not allowed.
287 * ---------------------------------------------------------------------------
288 */
289void unifi_sdio_interrupt_handler(card_t *card)
290{
291 /*
292 * Set the flag to say reason for waking was SDIO interrupt.
293 * Then ask the OS layer to run the unifi_bh to give attention to the UniFi.
294 */
295 card->bh_reason_unifi = 1;
296 (void)unifi_run_bh(card->ospriv);
297} /* sdio_interrupt_handler() */
298
299
300/*
301 * ---------------------------------------------------------------------------
302 * unifi_configure_low_power_mode
303 *
304 * This function should be called by the OS-dependent when
305 * the deep sleep signaling needs to be enabled or disabled.
306 *
307 * Arguments:
308 * card Pointer to card context structure.
309 * low_power_mode Disable/Enable the deep sleep signaling
310 * periodic_wake_mode UniFi wakes host periodically.
311 *
312 * Returns:
313 * CSR_RESULT_SUCCESS on success or a CSR error code.
314 * ---------------------------------------------------------------------------
315 */
316CsrResult unifi_configure_low_power_mode(card_t *card,
317 enum unifi_low_power_mode low_power_mode,
318 enum unifi_periodic_wake_mode periodic_wake_mode)
319{
320 card->low_power_mode = low_power_mode;
321 card->periodic_wake_mode = periodic_wake_mode;
322
323 unifi_trace(card->ospriv, UDBG1,
324 "unifi_configure_low_power_mode: new mode = %s, wake_host = %s\n",
325 (low_power_mode == UNIFI_LOW_POWER_DISABLED)?"disabled" : "enabled",
326 (periodic_wake_mode == UNIFI_PERIODIC_WAKE_HOST_DISABLED)?"FALSE" : "TRUE");
327
328 (void)unifi_run_bh(card->ospriv);
329 return CSR_RESULT_SUCCESS;
330} /* unifi_configure_low_power_mode() */
331
332
333/*
334 * ---------------------------------------------------------------------------
335 * unifi_force_low_power_mode
336 *
337 * This function should be called by the OS-dependent when
338 * UniFi needs to be set to the low power mode (e.g. on suspend)
339 *
340 * Arguments:
341 * card Pointer to card context structure.
342 *
343 * Returns:
344 * CSR_RESULT_SUCCESS on success or a CSR error code.
345 * ---------------------------------------------------------------------------
346 */
347CsrResult unifi_force_low_power_mode(card_t *card)
348{
349 if (card->low_power_mode == UNIFI_LOW_POWER_DISABLED)
350 {
351 unifi_error(card->ospriv, "Attempt to set mode to TORPID when lower power mode is disabled\n");
352 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
353 }
354
355 return unifi_set_host_state(card, UNIFI_HOST_STATE_TORPID);
356} /* unifi_force_low_power_mode() */
357
358
359/*
360 * ---------------------------------------------------------------------------
361 * unifi_bh
362 *
363 * This function should be called by the OS-dependent code when
364 * host and/or UniFi has requested an exchange of messages.
365 *
366 * Arguments:
367 * card Pointer to card context structure.
368 *
369 * Returns:
370 * CSR_RESULT_SUCCESS on success or a CSR error code.
371 * ---------------------------------------------------------------------------
372 */
373CsrResult unifi_bh(card_t *card, u32 *remaining)
374{
375 CsrResult r;
376 CsrResult csrResult;
377 u8 pending;
378 s32 iostate, j;
379 const enum unifi_low_power_mode low_power_mode = card->low_power_mode;
380 u16 data_slots_used = 0;
381
382
383 /* Process request to raise the maximum SDIO clock */
384 r = process_clock_request(card);
385 if (r != CSR_RESULT_SUCCESS)
386 {
387 unifi_error(card->ospriv, "Error setting maximum SDIO clock\n");
388 goto exit;
389 }
390
391 /*
392 * Why was the BH thread woken?
393 * If it was an SDIO interrupt, UniFi is awake and we need to process it.
394 * If it was a host process queueing data, then we need to awaken UniFi.
395 *
396 * Priority of flags is top down.
397 *
398 * ----------------------------------------------------------+
399 * \state| AWAKE | DROWSY | TORPID |
400 * flag\ | | | |
401 * ---------+--------------+----------------+----------------|
402 * | do the host | go to AWAKE and| go to AWAKE and|
403 * unifi | protocol | do the host | do the host |
404 * | | protocol | protocol |
405 * ---------+--------------+----------------+----------------|
406 * | do the host | | |
407 * host | protocol | do nothing | go to DROWSY |
408 * | | | |
409 * ---------+--------------+----------------+----------------|
410 * | | | should not |
411 * timeout | go to TORPID | error, unifi | occur |
412 * | | didn't wake up | do nothing |
413 * ----------------------------------------------------------+
414 *
415 * Note that if we end up in the AWAKE state we always do the host protocol.
416 */
417
418 do
419 {
420 /*
421 * When the host state is set to DROWSY, then we can not disable the
422 * interrupts as UniFi can generate an interrupt even when the INT_ENABLE
423 * register has the interrupts disabled. This interrupt will be lost.
424 */
425 if (card->host_state == UNIFI_HOST_STATE_DROWSY || card->host_state == UNIFI_HOST_STATE_TORPID)
426 {
427 u8 reason_unifi;
428
429 /*
430 * An interrupt may occur while or after we cache the reason.
431 * This interrupt will cause the unifi_bh() to be scheduled again.
432 * Any interrupt that has happened before the register is read
433 * and is considered spurious has to acknowledged.
434 */
435 reason_unifi = card->bh_reason_unifi;
436
437 /*
438 * If an interrupt is received, check if it was a real one,
439 * set the host state to AWAKE and run the BH.
440 */
441 r = CardPendingInt(card, &pending);
442 if (r != CSR_RESULT_SUCCESS)
443 {
444 goto exit;
445 }
446
447 if (pending)
448 {
449 unifi_trace(card->ospriv, UDBG5,
450 "UNIFI_HOST_STATE_%s: Set state to AWAKE.\n",
451 (card->host_state == UNIFI_HOST_STATE_TORPID)?"TORPID" : "DROWSY");
452
453 r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
454 if (r == CSR_RESULT_SUCCESS)
455 {
456 (*remaining) = 0;
457 break;
458 }
459 }
460 else if (reason_unifi)
461 {
462 CsrSdioInterruptAcknowledge(card->sdio_if);
463 }
464
465 /*
466 * If an chip is in TORPID, and the host wants to wake it up,
467 * set the host state to DROWSY and wait for the wake-up interrupt.
468 */
469 if ((card->host_state == UNIFI_HOST_STATE_TORPID) && card->bh_reason_host)
470 {
471 r = unifi_set_host_state(card, UNIFI_HOST_STATE_DROWSY);
472 if (r == CSR_RESULT_SUCCESS)
473 {
474 /*
475 * set the timeout value to UNIFI_DEFAULT_WAKE_TIMEOUT
476 * to capture a wake error.
477 */
478 card->bh_reason_host = 0;
479 (*remaining) = UNIFI_DEFAULT_WAKE_TIMEOUT;
480 return CSR_RESULT_SUCCESS;
481 }
482
483 goto exit;
484 }
485
486 /*
487 * If the chip is in DROWSY, and the timeout expires,
488 * we need to reset the chip. This should never occur.
489 * (If it does, check that the calling thread set "remaining"
490 * according to the time remaining when unifi_bh() was called).
491 */
492 if ((card->host_state == UNIFI_HOST_STATE_DROWSY) && ((*remaining) == 0))
493 {
494 unifi_error(card->ospriv, "UniFi did not wake up on time...\n");
495
496 /*
497 * Check if Function1 has gone away or
498 * if we missed an SDIO interrupt.
499 */
500 r = unifi_check_io_status(card, &iostate);
501 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
502 {
503 goto exit;
504 }
505 /* Need to reset and reboot */
506 return CSR_RESULT_FAILURE;
507 }
508 }
509 else
510 {
511 if (card->bh_reason_unifi || card->bh_reason_host)
512 {
513 break;
514 }
515
516 if (((*remaining) == 0) && (low_power_mode == UNIFI_LOW_POWER_ENABLED))
517 {
518 r = unifi_set_host_state(card, UNIFI_HOST_STATE_TORPID);
519 if (r == CSR_RESULT_SUCCESS)
520 {
521 (*remaining) = 0;
522 return CSR_RESULT_SUCCESS;
523 }
524
525 goto exit;
526 }
527 }
528
529 /* No need to run the host protocol */
530 return CSR_RESULT_SUCCESS;
531 } while (0);
532
533
534 /* Disable the SDIO interrupts while doing SDIO ops */
535 csrResult = CsrSdioInterruptDisable(card->sdio_if);
536 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
537 {
538 r = CSR_WIFI_HIP_RESULT_NO_DEVICE;
539 goto exit;
540 }
541 if (csrResult != CSR_RESULT_SUCCESS)
542 {
543 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
544 unifi_error(card->ospriv, "Failed to disable SDIO interrupts. unifi_bh queues error.\n");
545 goto exit;
546 }
547
548 /* Now that the interrupts are disabled, ack the interrupt */
549 CsrSdioInterruptAcknowledge(card->sdio_if);
550
551 /* Run the HIP */
552 r = process_bh(card);
553 if (r != CSR_RESULT_SUCCESS)
554 {
555 goto exit;
556 }
557
558 /*
559 * If host is now idle, schedule a timer for the delay before we
560 * let UniFi go into deep sleep.
561 * If the timer goes off, we will move to TORPID state.
562 * If UniFi raises an interrupt in the meantime, we will cancel
563 * the timer and start a new one when we become idle.
564 */
565 for (j = 0; j < UNIFI_NO_OF_TX_QS; j++)
566 {
567 data_slots_used += CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_traffic_queue[j]);
568 }
569
570 if ((low_power_mode == UNIFI_LOW_POWER_ENABLED) && (data_slots_used == 0))
571 {
572#ifndef CSR_WIFI_HIP_TA_DISABLE
573 if (card->ta_sampling.traffic_type != CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_PERIODIC)
574 {
575#endif
576 /* return the UNIFI_DEFAULT_HOST_IDLE_TIMEOUT, so we can go to sleep. */
577 unifi_trace(card->ospriv, UDBG5,
578 "Traffic is not periodic, set timer for TORPID.\n");
579 (*remaining) = UNIFI_DEFAULT_HOST_IDLE_TIMEOUT;
580#ifndef CSR_WIFI_HIP_TA_DISABLE
581 }
582 else
583 {
584 unifi_trace(card->ospriv, UDBG5,
585 "Traffic is periodic, set unifi to TORPID immediately.\n");
586 if (CardAreAllFromHostDataSlotsEmpty(card) == 1)
587 {
588 r = unifi_set_host_state(card, UNIFI_HOST_STATE_TORPID);
589 if (r != CSR_RESULT_SUCCESS)
590 {
591 goto exit;
592 }
593 }
594 }
595#endif
596 }
597
598 csrResult = CsrSdioInterruptEnable(card->sdio_if);
599 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
600 {
601 r = CSR_WIFI_HIP_RESULT_NO_DEVICE;
602 }
603 if (csrResult != CSR_RESULT_SUCCESS)
604 {
605 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
606 unifi_error(card->ospriv, "Failed to enable SDIO interrupt\n");
607 }
608
609exit:
610
611 unifi_trace(card->ospriv, UDBG4, "New state=%d\n", card->host_state);
612
613 if (r != CSR_RESULT_SUCCESS)
614 {
615#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
616 unifi_debug_buf_dump();
617#endif
618 /* If an interrupt has been raised, ack it here */
619 if (card->bh_reason_unifi)
620 {
621 CsrSdioInterruptAcknowledge(card->sdio_if);
622 }
623
624 unifi_error(card->ospriv,
625 "unifi_bh: state=%d %c, clock=%dkHz, interrupt=%d host=%d, power_save=%s\n",
626 card->host_state,
627 (card->host_state == UNIFI_HOST_STATE_AWAKE)?'A' : (card->host_state == UNIFI_HOST_STATE_DROWSY)?'D' : 'T',
628 card->sdio_clock_speed / 1000,
629 card->bh_reason_unifi, card->bh_reason_host,
630 (low_power_mode == UNIFI_LOW_POWER_DISABLED)?"disabled" : "enabled");
631
632 /* Try to capture firmware panic codes */
633 (void)unifi_capture_panic(card);
634
635 /* Ask for a mini-coredump when the driver has reset UniFi */
636 (void)unifi_coredump_request_at_next_reset(card, 1);
637 }
638
639 return r;
640} /* unifi_bh() */
641
642
643/*
644 * ---------------------------------------------------------------------------
645 * process_clock_request
646 *
647 * Handle request from the OS layer to increase the SDIO clock speed.
648 * The fast clock is limited until the firmware has indicated that it has
649 * completed initialisation to the OS layer.
650 *
651 * Arguments:
652 * card Pointer to card context structure.
653 *
654 * Returns:
655 * CSR_RESULT_SUCCESS on success or CSR error code.
656 * ---------------------------------------------------------------------------
657 */
658static CsrResult process_clock_request(card_t *card)
659{
660 CsrResult r = CSR_RESULT_SUCCESS;
661 CsrResult csrResult;
662
663 if (!card->request_max_clock)
664 {
665 return CSR_RESULT_SUCCESS; /* No pending request */
666 }
667
668 /*
669 * The SDIO clock speed request from the OS layer is only acted upon if
670 * the UniFi is awake. If it was in any other state, the clock speed will
671 * transition through SAFE to MAX while the host wakes it up, and the
672 * final speed reached will be UNIFI_SDIO_CLOCK_MAX_HZ.
673 * This assumes that the SME never requests low power mode while the f/w
674 * initialisation takes place.
675 */
676 if (card->host_state == UNIFI_HOST_STATE_AWAKE)
677 {
678 unifi_trace(card->ospriv, UDBG1, "Set SDIO max clock\n");
679 csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_MAX_HZ);
680 if (csrResult != CSR_RESULT_SUCCESS)
681 {
682 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
683 }
684 else
685 {
686 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_MAX_HZ; /* log the new freq */
687 }
688 }
689 else
690 {
691 unifi_trace(card->ospriv, UDBG1, "Will set SDIO max clock after wakeup\n");
692 }
693
694 /* Cancel the request now that it has been acted upon, or is about to be
695 * by the wakeup mechanism
696 */
697 card->request_max_clock = 0;
698
699 return r;
700}
701
702
703/*
704 * ---------------------------------------------------------------------------
705 * process_bh
706 *
707 * Exchange messages with UniFi
708 *
709 * Arguments:
710 * card Pointer to card context structure.
711 *
712 * Returns:
713 * CSR_RESULT_SUCCESS on success or CSR error code.
714 * ---------------------------------------------------------------------------
715 */
716static CsrResult process_bh(card_t *card)
717{
718 CsrResult r;
719 u8 more;
720 more = FALSE;
721
722 /* Process the reasons (interrupt, signals) */
723 do
724 {
725 /*
726 * Run in a while loop, to save clearing the interrupts
727 * every time around the outside loop.
728 */
729 do
730 {
731 /* If configured to run the HIP just once, skip first loop */
732 if (card->intmode & CSR_WIFI_INTMODE_RUN_BH_ONCE)
733 {
734 break;
735 }
736
737 r = handle_host_protocol(card, &more);
738 if (r != CSR_RESULT_SUCCESS)
739 {
740 return r;
741 }
742
743#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
744 unifi_debug_log_to_buf("c52=%d c53=%d tx=%d txc=%d rx=%d s=%d t=%d fc=%d\n",
745 card->cmd_prof.cmd52_count,
746 card->cmd_prof.cmd53_count,
747 card->cmd_prof.tx_count,
748 card->cmd_prof.tx_cfm_count,
749 card->cmd_prof.rx_count,
750 card->cmd_prof.sdio_cmd_signal,
751 card->cmd_prof.sdio_cmd_to_host,
752 card->cmd_prof.sdio_cmd_from_host_and_clear
753 );
754
755 card->cmd_prof.cmd52_count = card->cmd_prof.cmd53_count = 0;
756 card->cmd_prof.tx_count = card->cmd_prof.tx_cfm_count = card->cmd_prof.rx_count = 0;
757
758 card->cmd_prof.cmd52_f0_r_count = 0;
759 card->cmd_prof.cmd52_f0_w_count = 0;
760 card->cmd_prof.cmd52_r8or16_count = 0;
761 card->cmd_prof.cmd52_w8or16_count = 0;
762 card->cmd_prof.cmd52_r16_count = 0;
763 card->cmd_prof.cmd52_w16_count = 0;
764 card->cmd_prof.cmd52_r32_count = 0;
765
766 card->cmd_prof.sdio_cmd_signal = 0;
767 card->cmd_prof.sdio_cmd_clear_slot = 0;
768 card->cmd_prof.sdio_cmd_to_host = 0;
769 card->cmd_prof.sdio_cmd_from_host = 0;
770 card->cmd_prof.sdio_cmd_from_host_and_clear = 0;
771#endif
772
773
774 } while (more || card->bh_reason_unifi || card->bh_reason_host);
775
776 /* Acknowledge the h/w interrupt */
777 r = CardClearInt(card);
778 if (r != CSR_RESULT_SUCCESS)
779 {
780 unifi_error(card->ospriv, "Failed to acknowledge interrupt.\n");
781 return r;
782 }
783
784 /*
785 * UniFi may have tried to generate an interrupt during the
786 * CardClearInt() was running. So, we need to run the host
787 * protocol again, to check if there are any pending requests.
788 */
789 r = handle_host_protocol(card, &more);
790 if (r != CSR_RESULT_SUCCESS)
791 {
792 return r;
793 }
794
795#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
796 unifi_debug_log_to_buf("c52=%d c53=%d tx=%d txc=%d rx=%d s=%d t=%d fc=%d\n",
797 card->cmd_prof.cmd52_count,
798 card->cmd_prof.cmd53_count,
799 card->cmd_prof.tx_count,
800 card->cmd_prof.tx_cfm_count,
801 card->cmd_prof.rx_count,
802 card->cmd_prof.sdio_cmd_signal,
803 card->cmd_prof.sdio_cmd_to_host,
804 card->cmd_prof.sdio_cmd_from_host_and_clear
805 );
806
807 card->cmd_prof.cmd52_count = card->cmd_prof.cmd53_count = 0;
808 card->cmd_prof.tx_count = card->cmd_prof.tx_cfm_count = card->cmd_prof.rx_count = 0;
809
810 card->cmd_prof.cmd52_f0_r_count = 0;
811 card->cmd_prof.cmd52_f0_w_count = 0;
812 card->cmd_prof.cmd52_r8or16_count = 0;
813 card->cmd_prof.cmd52_w8or16_count = 0;
814 card->cmd_prof.cmd52_r16_count = 0;
815 card->cmd_prof.cmd52_w16_count = 0;
816 card->cmd_prof.cmd52_r32_count = 0;
817
818 card->cmd_prof.sdio_cmd_signal = 0;
819 card->cmd_prof.sdio_cmd_clear_slot = 0;
820 card->cmd_prof.sdio_cmd_to_host = 0;
821 card->cmd_prof.sdio_cmd_from_host = 0;
822 card->cmd_prof.sdio_cmd_from_host_and_clear = 0;
823#endif
824 /* If configured to run the HIP just once, work is now done */
825 if (card->intmode & CSR_WIFI_INTMODE_RUN_BH_ONCE)
826 {
827 break;
828 }
829
830 } while (more || card->bh_reason_unifi || card->bh_reason_host);
831
832#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
833 if ((card->intmode & CSR_WIFI_INTMODE_RUN_BH_ONCE) == 0)
834 {
835 unifi_debug_log_to_buf("proc=%d\n",
836 card->cmd_prof.process_count);
837 }
838#endif
839
840 return CSR_RESULT_SUCCESS;
841} /* process_bh() */
842
843
844/*
845 * ---------------------------------------------------------------------------
846 * handle_host_protocol
847 *
848 * This function implements the Host Interface Protocol (HIP) as
849 * described in the Host Interface Protocol Specification.
850 *
851 * Arguments:
852 * card Pointer to card context structure.
853 * processed_something Pointer to location to update processing status:
854 * TRUE when data was transferred
855 * FALSE when no data was transferred (queues empty)
856 *
857 * Returns:
858 * CSR_RESULT_SUCCESS on success or CSR error code.
859 * ---------------------------------------------------------------------------
860 */
861static CsrResult handle_host_protocol(card_t *card, u8 *processed_something)
862{
863 CsrResult r;
864 s32 done;
865
866 *processed_something = FALSE;
867
868#ifdef CSR_WIFI_HIP_NOISY
869 unifi_error(card->ospriv, " ======================== \n");
870#endif /* CSR_WIFI_HIP_NOISY */
871
872#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
873 card->cmd_prof.process_count++;
874#endif
875
876 card->bh_reason_unifi = card->bh_reason_host = 0;
877 card->generate_interrupt = 0;
878
879
880 /*
881 * (Re)fill the T-H signal buffer
882 */
883 r = read_to_host_signals(card, &done);
884 if (r != CSR_RESULT_SUCCESS)
885 {
886 unifi_error(card->ospriv, "Error occurred reading to-host signals\n");
887 return r;
888 }
889 if (done > 0)
890 {
891 *processed_something = TRUE;
892 }
893
894 /*
895 * Process any to-host signals.
896 * Perform any requested CMD53 transfers here, but just queue any
897 * bulk data command responses.
898 */
899 r = process_to_host_signals(card, &done);
900 if (r != CSR_RESULT_SUCCESS)
901 {
902 unifi_error(card->ospriv, "Error occurred processing to-host signals\n");
903 return r;
904 }
905
906 /* Now send any signals in the F-H queues */
907 /* Give precedence to the command queue */
908 r = process_fh_cmd_queue(card, &done);
909 if (r != CSR_RESULT_SUCCESS)
910 {
911 unifi_error(card->ospriv, "Error occurred processing from-host signals\n");
912 return r;
913 }
914 if (done > 0)
915 {
916 *processed_something = TRUE;
917 }
918
919 r = process_fh_traffic_queue(card, &done);
920 if (r != CSR_RESULT_SUCCESS)
921 {
922 unifi_error(card->ospriv, "Error occurred processing from-host data signals\n");
923 return r;
924 }
925 if (done > 0)
926 {
927 *processed_something = TRUE;
928 }
929
930 /* Flush out the batch of signals to the UniFi. */
931 r = flush_fh_buffer(card);
932 if (r != CSR_RESULT_SUCCESS)
933 {
934 unifi_error(card->ospriv, "Failed to copy from-host signals to UniFi\n");
935 return r;
936 }
937
938
939 /*
940 * Send the host interrupt to say the queues have been modified.
941 */
942 if (card->generate_interrupt)
943 {
944 r = CardGenInt(card);
945 if (r != CSR_RESULT_SUCCESS)
946 {
947 unifi_error(card->ospriv, "Failed to notify UniFi that queues have been modified.\n");
948 return r;
949 }
950 }
951
952#ifdef CSR_WIFI_RX_PATH_SPLIT
953#ifdef CSR_WIFI_RX_PATH_SPLIT_DONT_USE_WQ
954 unifi_rx_queue_flush(card->ospriv);
955#endif
956#endif
957
958 /* See if we can re-enable transmission now */
959 restart_packet_flow(card);
960
961#ifdef CSR_PRE_ALLOC_NET_DATA
962 r = prealloc_netdata_alloc(card);
963 if (r != CSR_RESULT_SUCCESS)
964 {
965 unifi_error(card->ospriv, "prealloc_netdata failed\n");
966 return r;
967 }
968#endif
969
970 /*
971 * Don't put the thread sleep if we just interacted with the chip,
972 * there might be more to do if we look again.
973 */
974 return r;
975} /* handle_host_protocol() */
976
977
978/*
979 * Rounds the given signal length in bytes to a whole number
980 * of sig_frag_size.
981 */
982#define GET_CHUNKS_FOR(SIG_FRAG_SIZE, LENGTH) (((LENGTH) + ((SIG_FRAG_SIZE)-1)) / (SIG_FRAG_SIZE))
983
984
985/*
986 * ---------------------------------------------------------------------------
987 * read_to_host_signals
988 *
989 * Read everything pending in the UniFi TH signal buffer.
990 * Only do it if the local buffer is empty.
991 *
992 * Arguments:
993 * card Pointer to card context struct
994 * processed Number of signals read:
995 * 0 if there were no signals pending,
996 * 1 if we read at least one signal
997 * Returns:
998 * CSR error code if an error occurred.
999 * ---------------------------------------------------------------------------
1000 */
1001static CsrResult read_to_host_signals(card_t *card, s32 *processed)
1002{
1003 s32 count_thw, count_thr;
1004 s32 unread_chunks, unread_bytes;
1005 CsrResult r;
1006
1007 *processed = 0;
1008
1009 /* Read any pending signals or bulk data commands */
1010 count_thw = unifi_read_shared_count(card, card->sdio_ctrl_addr + 4);
1011 if (count_thw < 0)
1012 {
1013 unifi_error(card->ospriv, "Failed to read to-host sig written count\n");
1014 return CSR_RESULT_FAILURE;
1015 }
1016 card->to_host_signals_w = count_thw; /* diag */
1017
1018 count_thr = card->to_host_signals_r;
1019
1020 if (count_thw == count_thr)
1021 {
1022 return CSR_RESULT_SUCCESS;
1023 }
1024
1025 unread_chunks =
1026 (((count_thw - count_thr) + 128) % 128) - card->th_buffer.count;
1027
1028 if (unread_chunks == 0)
1029 {
1030 return CSR_RESULT_SUCCESS;
1031 }
1032
1033 unread_bytes = card->config_data.sig_frag_size * unread_chunks;
1034
1035
1036 r = unifi_bulk_rw(card,
1037 card->config_data.tohost_sigbuf_handle,
1038 card->th_buffer.ptr,
1039 unread_bytes,
1040 UNIFI_SDIO_READ);
1041 if (r != CSR_RESULT_SUCCESS)
1042 {
1043 unifi_error(card->ospriv, "Failed to read ToHost signal\n");
1044 return r;
1045 }
1046
1047 card->th_buffer.ptr += unread_bytes;
1048 card->th_buffer.count += (u16)unread_chunks;
1049
1050 *processed = 1;
1051
1052 return CSR_RESULT_SUCCESS;
1053} /* read_to_host_signals() */
1054
1055
1056/*
1057 * ---------------------------------------------------------------------------
1058 * update_to_host_signals_r
1059 *
1060 * Advance the shared-memory count of chunks read from the to-host
1061 * signal buffer.
1062 * Raise a UniFi internal interrupt to tell the firmware that the
1063 * count has changed.
1064 *
1065 * Arguments:
1066 * card Pointer to card context struct
1067 * pending Number of chunks remaining
1068 *
1069 * Returns:
1070 * CSR_RESULT_SUCCESS on success or CSR error code
1071 * ---------------------------------------------------------------------------
1072 */
1073static CsrResult update_to_host_signals_r(card_t *card, s16 pending)
1074{
1075 CsrResult r;
1076
1077 card->to_host_signals_r =
1078 (card->to_host_signals_r + (card->th_buffer.count - pending)) % 128;
1079 card->th_buffer.count = pending;
1080
1081 /* Update the count of signals read */
1082 r = unifi_write_8_or_16(card, card->sdio_ctrl_addr + 6,
1083 (u8)card->to_host_signals_r);
1084 if (r != CSR_RESULT_SUCCESS)
1085 {
1086 unifi_error(card->ospriv, "Failed to update to-host signals read\n");
1087 return r;
1088 }
1089
1090 r = CardGenInt(card);
1091 if (r != CSR_RESULT_SUCCESS)
1092 {
1093 unifi_error(card->ospriv, "Failed to notify UniFi that we processed to-host signals.\n");
1094 return r;
1095 }
1096
1097 card->generate_interrupt = 0;
1098
1099 return CSR_RESULT_SUCCESS;
1100} /* update_to_host_signals_r() */
1101
1102
1103/*
1104 * ---------------------------------------------------------------------------
1105 * read_unpack_cmd
1106 *
1107 * Converts a wire-formatted command to the host bulk_data_cmd_t structure.
1108 *
1109 * Arguments:
1110 * ptr Pointer to the command
1111 * bulk_data_cmd Pointer to the host structure
1112 *
1113 * Returns:
1114 * None.
1115 * ---------------------------------------------------------------------------
1116 */
1117static void read_unpack_cmd(const u8 *ptr, bulk_data_cmd_t *bulk_data_cmd)
1118{
1119 s16 index = 0;
1120 bulk_data_cmd->cmd_and_len = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1121 index += SIZEOF_UINT16;
1122 bulk_data_cmd->data_slot = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1123 index += SIZEOF_UINT16;
1124 bulk_data_cmd->offset = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1125 index += SIZEOF_UINT16;
1126 bulk_data_cmd->buffer_handle = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1127 index += SIZEOF_UINT16;
1128} /* read_unpack_cmd */
1129
1130
1131/*
1132 * ---------------------------------------------------------------------------
1133 * process_to_host_signals
1134 *
1135 * Read and dispatch signals from the UniFi
1136 *
1137 * Arguments:
1138 * card Pointer to card context struct
1139 * processed Pointer to location to write processing result:
1140 * 0 if there were no signals pending,
1141 * 1 if we read at least one signal
1142 *
1143 * Returns:
1144 * CSR error code if there was an error
1145 *
1146 * Notes:
1147 * Since bulk data transfers can take a long time, if we wait until
1148 * all are done before we acknowledge the signals, the UniFi runs out
1149 * of buffer space. Therefore we keep a count of the bytes transferred
1150 * in bulk data commands, and update the to-host-signals-read count
1151 * if we've done a large transfer.
1152 *
1153 * All data in the f/w is stored in a little endian format, without any
1154 * padding bytes. Every read from the memory has to be transformed in
1155 * host (cpu specific) format, before we can process it. Therefore we
1156 * use read_unpack_cmd() and read_unpack_signal() to convert the raw data
1157 * contained in the card->th_buffer.buf to host structures.
1158 * Important: UDI clients use wire-formatted structures, so we need to
1159 * indicate all data, as we have read it from the device.
1160 * ---------------------------------------------------------------------------
1161 */
1162static CsrResult process_to_host_signals(card_t *card, s32 *processed)
1163{
1164 s16 pending;
1165 s16 remaining;
1166 u8 *bufptr;
1167 bulk_data_param_t data_ptrs;
1168 s16 cmd;
1169 u16 sig_len;
1170 s16 i;
1171 u16 chunks_in_buf;
1172 u16 bytes_transferred = 0;
1173 CsrResult r = CSR_RESULT_SUCCESS;
1174
1175 *processed = 0;
1176
1177 pending = card->th_buffer.count;
1178
1179 /* Are there new to-host signals? */
1180 unifi_trace(card->ospriv, UDBG4, "handling %d to-host chunks\n", pending);
1181
1182 if (!pending)
1183 {
1184 return CSR_RESULT_SUCCESS;
1185 }
1186
1187 /*
1188 * This is a pointer to the raw data we have read from the f/w.
1189 * Can be a signal or a command. Note that we need to convert
1190 * it to a host structure before we process it.
1191 */
1192 bufptr = card->th_buffer.buf;
1193
1194 while (pending > 0)
1195 {
1196 s16 f_flush_count = 0;
1197
1198 /*
1199 * Command and length are common to signal and bulk data msgs.
1200 * If command == 0 (i.e. a signal), len is number of bytes
1201 * *following* the 2-byte header.
1202 */
1203 cmd = bufptr[1] >> 4;
1204 sig_len = bufptr[0] + ((bufptr[1] & 0x0F) << 8);
1205
1206#ifdef CSR_WIFI_HIP_NOISY
1207 unifi_error(card->ospriv, "Received UniFi msg cmd=%d, len=%d\n",
1208 cmd, sig_len);
1209#endif /* CSR_WIFI_HIP_NOISY */
1210
1211 if ((sig_len == 0) &&
1212 ((cmd != SDIO_CMD_CLEAR_SLOT) && (cmd != SDIO_CMD_PADDING)))
1213 {
1214 unifi_error(card->ospriv, "incomplete signal or command: has size zero\n");
1215 return CSR_RESULT_FAILURE;
1216 }
1217 /*
1218 * Make sure the buffer contains a complete message.
1219 * Signals may occupy multiple chunks, bulk-data commands occupy
1220 * one chunk.
1221 */
1222 if (cmd == SDIO_CMD_SIGNAL)
1223 {
1224 chunks_in_buf = GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(sig_len + 2));
1225 }
1226 else
1227 {
1228 chunks_in_buf = 1;
1229 }
1230
1231 if (chunks_in_buf > (u16)pending)
1232 {
1233 unifi_error(card->ospriv, "incomplete signal (0x%x?): need %d chunks, got %d\n",
1234 GET_SIGNAL_ID(bufptr + 2),
1235 chunks_in_buf, pending);
1236 unifi_error(card->ospriv, " thsw=%d, thsr=%d\n",
1237 card->to_host_signals_w,
1238 card->to_host_signals_r);
1239 return CSR_RESULT_FAILURE;
1240 }
1241
1242
1243 switch (cmd)
1244 {
1245 case SDIO_CMD_SIGNAL:
1246 /* This is a signal. Read the rest of it and then handle it. */
1247#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1248 card->cmd_prof.sdio_cmd_signal++;
1249#endif
1250
1251 for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
1252 {
1253 /* Retrieve dataRefs[i].DataLength */
1254 u16 data_len = GET_PACKED_DATAREF_LEN(bufptr + 2, i);
1255
1256 /*
1257 * The bulk data length in the signal can not be greater than
1258 * the maximun length allowed by the SDIO config structure.
1259 */
1260 if (data_len > card->config_data.data_slot_size)
1261 {
1262 unifi_error(card->ospriv,
1263 "Bulk Data length (%d) exceeds Maximum Bulk Data length (%d)\n",
1264 data_len, card->config_data.data_slot_size);
1265 return CSR_RESULT_FAILURE;
1266 }
1267
1268 /*
1269 * Len here might not be the same as the length in the
1270 * bulk data slot. The slot length will always be even,
1271 * but len could be odd.
1272 */
1273 if (data_len != 0)
1274 {
1275 /* Retrieve dataRefs[i].SlotNumber */
1276 s16 slot = GET_PACKED_DATAREF_SLOT(bufptr + 2, i);
1277
1278 if (slot >= card->config_data.num_tohost_data_slots)
1279 {
1280 unifi_error(card->ospriv, "!!!bad slot number in to-host signal: %d, sig 0x%X\n",
1281 slot, cmd);
1282 return CSR_RESULT_FAILURE;
1283 }
1284
1285 data_ptrs.d[i].os_data_ptr = card->to_host_data[slot].os_data_ptr;
1286 data_ptrs.d[i].os_net_buf_ptr = card->to_host_data[slot].os_net_buf_ptr;
1287 data_ptrs.d[i].net_buf_length = card->to_host_data[slot].net_buf_length;
1288 data_ptrs.d[i].data_length = data_len;
1289 }
1290 else
1291 {
1292 UNIFI_INIT_BULK_DATA(&data_ptrs.d[i]);
1293 }
1294 }
1295
1296 /*
1297 * Log the signal to the UDI, before call unifi_receive_event() as
1298 * it can modify the bulk data.
1299 */
1300 if (card->udi_hook)
1301 {
1302 (*card->udi_hook)(card->ospriv, bufptr + 2, sig_len,
1303 &data_ptrs, UDI_LOG_TO_HOST);
1304 }
1305
1306#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
1307 if (GET_SIGNAL_ID(bufptr + 2) == CSR_MA_PACKET_CONFIRM_ID)
1308 {
1309 card->cmd_prof.tx_cfm_count++;
1310 }
1311 else if (GET_SIGNAL_ID(bufptr + 2) == CSR_MA_PACKET_INDICATION_ID)
1312 {
1313 if (data_ptrs.d[0].os_data_ptr)
1314 {
1315 if ((*data_ptrs.d[0].os_data_ptr) & 0x08)
1316 {
1317 card->cmd_prof.rx_count++;
1318 }
1319 }
1320 }
1321#endif
1322 /*
1323 * Check if the signal is MA-PACKET.cfm and if so check the status.
1324 * If the status is failure, search through the slot records to find
1325 * if any slots are occupied for this host tag. This can happen if
1326 * f/w has not downloaded the bulkdata and before that itself it has
1327 * signalled the confirm with failure. If it finds a slot with that
1328 * host tag then, it clears the corresponding slot
1329 */
1330
1331 if (GET_SIGNAL_ID(bufptr + 2) == CSR_MA_PACKET_CONFIRM_ID)
1332 {
1333 /* Get host tag and transmission status */
1334 u32 host_tag = GET_PACKED_MA_PACKET_CONFIRM_HOST_TAG(bufptr + 2);
1335 u16 status = GET_PACKED_MA_PACKET_CONFIRM_TRANSMISSION_STATUS(bufptr + 2);
1336
1337 unifi_trace(card->ospriv, UDBG4, "process_to_host_signals signal ID=%x host Tag=%x status=%x\n",
1338 GET_SIGNAL_ID(bufptr + 2), host_tag, status);
1339
1340 /* If transmission status is failure then search through the slot records
1341 * and if for any slot records the clear slot is not done then do it now
1342 */
1343
1344 if (status && (card->fh_slot_host_tag_record))
1345 {
1346 u16 num_fh_slots = card->config_data.num_fromhost_data_slots;
1347
1348 /* search through the list of slot records and match with host tag
1349 * If a slot is not yet cleared then clear the slot from here
1350 */
1351 for (i = 0; i < num_fh_slots; i++)
1352 {
1353 if (card->fh_slot_host_tag_record[i] == host_tag)
1354 {
1355#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
1356 /* Invoke the HAL module function to requeue it back to HAL Queues */
1357 r = unifi_reque_ma_packet_request(card->ospriv, host_tag, status, &card->from_host_data[i].bd);
1358 card->fh_slot_host_tag_record[i] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
1359 if (CSR_RESULT_SUCCESS != r)
1360 {
1361 unifi_trace(card->ospriv, UDBG5, "process_to_host_signals: Failed to requeue Packet(hTag:%x) back to HAL \n", host_tag);
1362 CardClearFromHostDataSlot(card, i);
1363 }
1364 else
1365 {
1366 CardClearFromHostDataSlotWithoutFreeingBulkData(card, i);
1367 }
1368
1369#else
1370 unifi_trace(card->ospriv, UDBG4, "process_to_host_signals Clear slot=%x host tag=%x\n", i, host_tag);
1371 card->fh_slot_host_tag_record[i] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
1372
1373 /* Set length field in from_host_data array to 0 */
1374 CardClearFromHostDataSlot(card, i);
1375#endif
1376 break;
1377 }
1378 }
1379 }
1380 }
1381
1382 /* Pass event to OS layer */
1383 unifi_receive_event(card->ospriv, bufptr + 2, sig_len, &data_ptrs);
1384
1385 /* Initialise the to_host data, so it can be re-used. */
1386 for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
1387 {
1388 /* The slot is only valid if the length is non-zero. */
1389 if (GET_PACKED_DATAREF_LEN(bufptr + 2, i) != 0)
1390 {
1391 s16 slot = GET_PACKED_DATAREF_SLOT(bufptr + 2, i);
1392 if (slot < card->config_data.num_tohost_data_slots)
1393 {
1394 UNIFI_INIT_BULK_DATA(&card->to_host_data[slot]);
1395 }
1396 }
1397 }
1398
1399#ifndef CSR_WIFI_DEFER_TH_FLUSH
1400 /*
1401 * If we have previously transferred a lot of data, ack
1402 * the signals read so far, so f/w can reclaim the buffer
1403 * memory sooner.
1404 */
1405 if (bytes_transferred >= TO_HOST_FLUSH_THRESHOLD)
1406 {
1407 f_flush_count = 1;
1408 }
1409#endif
1410 break;
1411
1412
1413 case SDIO_CMD_CLEAR_SLOT:
1414#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1415 card->cmd_prof.sdio_cmd_clear_slot++;
1416#endif
1417 /* This is a clear slot command. */
1418 if (sig_len != 0)
1419 {
1420 unifi_error(card->ospriv, "process_to_host_signals: clear slot, bad data len: 0x%X at offset %d\n",
1421 sig_len, bufptr - card->th_buffer.buf);
1422 return CSR_RESULT_FAILURE;
1423 }
1424
1425 r = process_clear_slot_command(card, bufptr);
1426 if (r != CSR_RESULT_SUCCESS)
1427 {
1428 unifi_error(card->ospriv, "Failed to process clear slot\n");
1429 return r;
1430 }
1431 break;
1432
1433 case SDIO_CMD_TO_HOST_TRANSFER:
1434 case SDIO_CMD_FROM_HOST_TRANSFER:
1435 case SDIO_CMD_FROM_HOST_AND_CLEAR:
1436 case SDIO_CMD_OVERLAY_TRANSFER:
1437 /* This is a bulk data command. */
1438 if (sig_len & 1)
1439 {
1440 unifi_error(card->ospriv, "process_to_host_signals: bulk data, bad data len: 0x%X at offset %d\n",
1441 sig_len, bufptr - card->th_buffer.buf);
1442 return CSR_RESULT_FAILURE;
1443 }
1444
1445 r = process_bulk_data_command(card, bufptr, cmd, sig_len);
1446 if (r != CSR_RESULT_SUCCESS)
1447 {
1448 unifi_error(card->ospriv, "Failed to process bulk cmd\n");
1449 return r;
1450 }
1451 /* Count the bytes transferred */
1452 bytes_transferred += sig_len;
1453
1454 if (cmd == SDIO_CMD_FROM_HOST_AND_CLEAR)
1455 {
1456#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1457 card->cmd_prof.sdio_cmd_from_host_and_clear++;
1458#endif
1459#ifndef CSR_WIFI_DEFER_TH_FLUSH
1460 f_flush_count = 1;
1461#endif
1462 }
1463#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1464 else if (cmd == SDIO_CMD_FROM_HOST_TRANSFER)
1465 {
1466 card->cmd_prof.sdio_cmd_from_host++;
1467 }
1468 else if (cmd == SDIO_CMD_TO_HOST_TRANSFER)
1469 {
1470 card->cmd_prof.sdio_cmd_to_host++;
1471 }
1472#endif
1473 break;
1474
1475 case SDIO_CMD_PADDING:
1476 break;
1477
1478 default:
1479 unifi_error(card->ospriv, "Unrecognised to-host command: %d\n", cmd);
1480 break;
1481 }
1482
1483 bufptr += chunks_in_buf * card->config_data.sig_frag_size;
1484 pending -= chunks_in_buf;
1485
1486 /*
1487 * Write out the host signal count when a significant
1488 * number of bytes of bulk data have been transferred or
1489 * when we have performed a CopyFromHostAndClear.
1490 */
1491 if (f_flush_count)
1492 {
1493 r = update_to_host_signals_r(card, pending);
1494 if (r != CSR_RESULT_SUCCESS)
1495 {
1496 return r;
1497 }
1498 bytes_transferred = 0;
1499 }
1500 }
1501
1502 if (pending)
1503 {
1504 unifi_warning(card->ospriv, "proc_th_sigs: %d unprocessed\n", pending);
1505 }
1506
1507 /* If we processed any signals, write the updated count to UniFi */
1508 if (card->th_buffer.count != pending)
1509 {
1510 r = update_to_host_signals_r(card, pending);
1511 if (r != CSR_RESULT_SUCCESS)
1512 {
1513 return r;
1514 }
1515 }
1516
1517 /*
1518 * Reset the buffer pointer, copying down any un-processed signals.
1519 * This can happen if we enable the optimisation in read_to_host_signals()
1520 * that limits the length to whole blocks.
1521 */
1522 remaining = card->th_buffer.ptr - bufptr;
1523 if (remaining < 0)
1524 {
1525 unifi_error(card->ospriv, "Processing TH signals overran the buffer\n");
1526 return CSR_RESULT_FAILURE;
1527 }
1528 if (remaining > 0)
1529 {
1530 /* Use a safe copy because source and destination may overlap */
1531 u8 *d = card->th_buffer.buf;
1532 u8 *s = bufptr;
1533 s32 n = remaining;
1534 while (n--)
1535 {
1536 *d++ = *s++;
1537 }
1538 }
1539 card->th_buffer.ptr = card->th_buffer.buf + remaining;
1540
1541
1542 /* If we reach here then we processed something */
1543 *processed = 1;
1544 return CSR_RESULT_SUCCESS;
1545} /* process_to_host_signals() */
1546
1547
1548/*
1549 * ---------------------------------------------------------------------------
1550 * process_clear_slot_command
1551 *
1552 * Process a clear slot command fom the UniFi.
1553 *
1554 * Arguments:
1555 * card Pointer to card context struct
1556 * bdcmd Pointer to bulk-data command msg from UniFi
1557 *
1558 * Returns:
1559 * 0 on success, CSR error code on error
1560 * ---------------------------------------------------------------------------
1561 */
1562static CsrResult process_clear_slot_command(card_t *card, const u8 *cmdptr)
1563{
1564 u16 data_slot;
1565 s16 slot;
1566
1567 data_slot = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cmdptr + SIZEOF_UINT16);
1568
1569 unifi_trace(card->ospriv, UDBG4, "Processing clear slot cmd, slot=0x%X\n",
1570 data_slot);
1571
1572 slot = data_slot & 0x7FFF;
1573
1574#ifdef CSR_WIFI_HIP_NOISY
1575 unifi_error(card->ospriv, "CMD clear data slot 0x%04x\n", data_slot);
1576#endif /* CSR_WIFI_HIP_NOISY */
1577
1578 if (data_slot & SLOT_DIR_TO_HOST)
1579 {
1580 if (slot >= card->config_data.num_tohost_data_slots)
1581 {
1582 unifi_error(card->ospriv,
1583 "Invalid to-host data slot in SDIO_CMD_CLEAR_SLOT: %d\n",
1584 slot);
1585 return CSR_RESULT_FAILURE;
1586 }
1587 /* clear to-host data slot */
1588 unifi_warning(card->ospriv, "Unexpected clear to-host data slot cmd: 0x%04x\n",
1589 data_slot);
1590 }
1591 else
1592 {
1593 if (slot >= card->config_data.num_fromhost_data_slots)
1594 {
1595 unifi_error(card->ospriv,
1596 "Invalid from-host data slot in SDIO_CMD_CLEAR_SLOT: %d\n",
1597 slot);
1598 return CSR_RESULT_FAILURE;
1599 }
1600
1601 /*
1602 * The driver is the owner to clear all slots now
1603 * Ref - comment in process_fh_traffic_queue
1604 * so it will just ignore the clear slot command from firmware
1605 * and return success
1606 */
1607 return CSR_RESULT_SUCCESS;
1608
1609 /* Set length field in from_host_data array to 0 */
1610 /* CardClearFromHostDataSlot(card, slot); */
1611 }
1612
1613 return CSR_RESULT_SUCCESS;
1614} /* process_clear_slot_command() */
1615
1616
1617/*
1618 * ---------------------------------------------------------------------------
1619 * process_bulk_data_command
1620 *
1621 * Process a bulk data request from the UniFi.
1622 *
1623 * Arguments:
1624 * card Pointer to card context struct
1625 * bdcmd Pointer to bulk-data command msg from UniFi
1626 * cmd, len Decoded values of command and length from the msg header
1627 * Cmd will only be one of:
1628 * SDIO_CMD_TO_HOST_TRANSFER
1629 * SDIO_CMD_FROM_HOST_TRANSFER
1630 * SDIO_CMD_FROM_HOST_AND_CLEAR
1631 * SDIO_CMD_OVERLAY_TRANSFER
1632 *
1633 * Returns:
1634 * CSR_RESULT_SUCCESS on success, CSR error code on error
1635 * ---------------------------------------------------------------------------
1636 */
1637static CsrResult process_bulk_data_command(card_t *card, const u8 *cmdptr,
1638 s16 cmd, u16 len)
1639{
1640 bulk_data_desc_t *bdslot;
1641#ifdef CSR_WIFI_ALIGNMENT_WORKAROUND
1642 u8 *host_bulk_data_slot;
1643#endif
1644 bulk_data_cmd_t bdcmd;
1645 s16 offset;
1646 s16 slot;
1647 s16 dir;
1648 CsrResult r;
1649
1650 read_unpack_cmd(cmdptr, &bdcmd);
1651
1652 unifi_trace(card->ospriv, UDBG4, "Processing bulk data cmd %d %s, len=%d, slot=0x%X\n",
1653 cmd, lookup_bulkcmd_name(cmd), len, bdcmd.data_slot);
1654
1655 /*
1656 * Round up the transfer length if required.
1657 * This is useful to force all transfers to be a multiple of the SDIO block
1658 * size, so the SDIO driver won't try to use a byte-mode CMD53. These are
1659 * broken on some hardware platforms.
1660 */
1661 if (card->sdio_io_block_pad)
1662 {
1663 len = (len + card->sdio_io_block_size - 1) & ~(card->sdio_io_block_size - 1);
1664 unifi_trace(card->ospriv, UDBG4, "Rounded bulk data length up to %d\n", len);
1665 }
1666
1667 slot = bdcmd.data_slot & 0x7FFF;
1668
1669 if (cmd == SDIO_CMD_OVERLAY_TRANSFER)
1670 {
1671 return CSR_WIFI_HIP_RESULT_INVALID_VALUE; /* Not used on CSR6xxx */
1672 }
1673 else
1674 {
1675 if (bdcmd.data_slot & SLOT_DIR_TO_HOST)
1676 {
1677 /* Request is for to-host bulk data */
1678
1679 /* Check sanity of slot number */
1680 if (slot >= card->config_data.num_tohost_data_slots)
1681 {
1682 unifi_error(card->ospriv,
1683 "Invalid to-host data slot in SDIO bulk xfr req: %d\n",
1684 slot);
1685 return CSR_RESULT_FAILURE;
1686 }
1687
1688 /* Allocate memory for card->to_host_data[slot] bulk data here. */
1689#ifdef CSR_PRE_ALLOC_NET_DATA
1690 r = prealloc_netdata_get(card, &card->to_host_data[slot], len);
1691#else
1692 r = unifi_net_data_malloc(card->ospriv, &card->to_host_data[slot], len);
1693#endif
1694 if (r != CSR_RESULT_SUCCESS)
1695 {
1696 unifi_error(card->ospriv, "Failed to allocate t-h bulk data\n");
1697 return CSR_RESULT_FAILURE;
1698 }
1699
1700 bdslot = &card->to_host_data[slot];
1701
1702 /* Make sure that the buffer is 4-bytes aligned */
1703 r = unifi_net_dma_align(card->ospriv, bdslot);
1704 if (r != CSR_RESULT_SUCCESS)
1705 {
1706 unifi_error(card->ospriv, "Failed to align t-h bulk data buffer for DMA\n");
1707 return CSR_RESULT_FAILURE;
1708 }
1709 }
1710 else
1711 {
1712 /* Request is for from-host bulk data */
1713
1714 if (slot >= card->config_data.num_fromhost_data_slots)
1715 {
1716 unifi_error(card->ospriv,
1717 "Invalid from-host data slot in SDIO bulk xfr req: %d\n",
1718 slot);
1719 return CSR_RESULT_FAILURE;
1720 }
1721 bdslot = &card->from_host_data[slot].bd;
1722 }
1723 offset = bdcmd.offset;
1724 }
1725 /* Do the transfer */
1726 dir = (cmd == SDIO_CMD_TO_HOST_TRANSFER)?
1727 UNIFI_SDIO_READ : UNIFI_SDIO_WRITE;
1728
1729 unifi_trace(card->ospriv, UDBG4,
1730 "Bulk %c %s len=%d, handle %d - slot=%d %p+(%d)\n",
1731 (dir == UNIFI_SDIO_READ)?'R' : 'W',
1732 lookup_bulkcmd_name(cmd),
1733 len,
1734 bdcmd.buffer_handle,
1735 slot, bdslot->os_data_ptr, offset);
1736#ifdef CSR_WIFI_HIP_NOISY
1737 unifi_error(card->ospriv, "Bulk %s len=%d, handle %d - slot=%d %p+(%d)\n",
1738 lookup_bulkcmd_name(cmd),
1739 len,
1740 bdcmd.buffer_handle,
1741 slot, bdslot->os_data_ptr, offset);
1742#endif /* CSR_WIFI_HIP_NOISY */
1743
1744
1745 if (bdslot->os_data_ptr == NULL)
1746 {
1747 unifi_error(card->ospriv, "Null os_data_ptr - Bulk %s handle %d - slot=%d o=(%d)\n",
1748 lookup_bulkcmd_name(cmd),
1749 bdcmd.buffer_handle,
1750 slot,
1751 offset);
1752 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
1753 }
1754
1755#ifdef CSR_WIFI_ALIGNMENT_WORKAROUND
1756 /* if os_data_ptr is not 4-byte aligned, then allocate a new buffer and copy data
1757 to new buffer to ensure the address passed to unifi_bulk_rw is 4-byte aligned */
1758
1759 if (len != 0 && (dir == UNIFI_SDIO_WRITE) && (((ptrdiff_t)bdslot->os_data_ptr + offset) & 3))
1760 {
1761 host_bulk_data_slot = kmalloc(len, GFP_KERNEL);
1762
1763 if (!host_bulk_data_slot)
1764 {
1765 unifi_error(card->ospriv, " failed to allocate request_data before unifi_bulk_rw\n");
1766 return -1;
1767 }
1768
1769 memcpy((void *)host_bulk_data_slot,
1770 (void *)(bdslot->os_data_ptr + offset), len);
1771
1772 r = unifi_bulk_rw(card,
1773 bdcmd.buffer_handle,
1774 (void *)host_bulk_data_slot,
1775 len,
1776 dir);
1777 }
1778 else
1779#endif
1780 {
1781 r = unifi_bulk_rw(card,
1782 bdcmd.buffer_handle,
1783 (void *)(bdslot->os_data_ptr + offset),
1784 len,
1785 dir);
1786 }
1787
1788 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1789 {
1790 return r;
1791 }
1792 if (r != CSR_RESULT_SUCCESS)
1793 {
1794 unifi_error(card->ospriv,
1795 "Failed: %s hlen=%d, slen=%d, handle %d - slot=%d %p+0x%X\n",
1796 lookup_bulkcmd_name(cmd),
1797 len, /* Header length */
1798 bdslot->data_length, /* Length stored in slot */
1799 bdcmd.buffer_handle,
1800 slot, bdslot->os_data_ptr, offset);
1801 return r;
1802 }
1803
1804 bdslot->data_length = len;
1805
1806 if (cmd == SDIO_CMD_FROM_HOST_AND_CLEAR)
1807 {
1808 if (slot >= card->config_data.num_fromhost_data_slots)
1809 {
1810 unifi_error(card->ospriv,
1811 "Invalid from-host data slot in SDIO_CMD_FROM_HOST_AND_CLEAR: %d\n",
1812 slot);
1813 return CSR_RESULT_FAILURE;
1814 }
1815
1816#ifdef CSR_WIFI_ALIGNMENT_WORKAROUND
1817 /* moving this check before we clear host data slot */
1818 if ((len != 0) && (dir == UNIFI_SDIO_WRITE) && (((ptrdiff_t)bdslot->os_data_ptr + offset) & 3))
1819 {
1820 kfree(host_bulk_data_slot);
1821 }
1822#endif
1823
1824 if (card->fh_slot_host_tag_record)
1825 {
1826 unifi_trace(card->ospriv, UDBG5, "CopyFromHostAndClearSlot Reset entry for slot=%d\n", slot);
1827
1828 /* reset the host tag entry for the corresponding slot */
1829 card->fh_slot_host_tag_record[slot] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
1830 }
1831
1832
1833 /* Set length field in from_host_data array to 0 */
1834 CardClearFromHostDataSlot(card, slot);
1835 }
1836
1837 return CSR_RESULT_SUCCESS;
1838} /* process_bulk_data_command() */
1839
1840
1841/*
1842 * ---------------------------------------------------------------------------
1843 * check_fh_sig_slots
1844 *
1845 * Check whether there are <n> free signal slots available on UniFi.
1846 * This takes into account the signals already batched since the
1847 * from_host_signal counts were last read.
1848 * If the from_host_signal counts indicate not enough space, we read
1849 * the latest count from UniFi to see if some more have been freed.
1850 *
1851 * Arguments:
1852 * None.
1853 *
1854 * Returns:
1855 * CSR_RESULT_SUCCESS, otherwise CSR error code on error.
1856 * ---------------------------------------------------------------------------
1857 */
1858static CsrResult check_fh_sig_slots(card_t *card, u16 needed, s32 *space_fh)
1859{
1860 u32 count_fhw;
1861 u32 occupied_fh, slots_fh;
1862 s32 count_fhr;
1863
1864 count_fhw = card->from_host_signals_w;
1865 count_fhr = card->from_host_signals_r;
1866 slots_fh = card->config_data.num_fromhost_sig_frags;
1867
1868 /* Only read the space in from-host queue if necessary */
1869 occupied_fh = (count_fhw - count_fhr) % 128;
1870
1871 if (slots_fh < occupied_fh)
1872 {
1873 *space_fh = 0;
1874 }
1875 else
1876 {
1877 *space_fh = slots_fh - occupied_fh;
1878 }
1879
1880 if ((occupied_fh != 0) && (*space_fh < needed))
1881 {
1882 count_fhr = unifi_read_shared_count(card, card->sdio_ctrl_addr + 2);
1883 if (count_fhr < 0)
1884 {
1885 unifi_error(card->ospriv, "Failed to read from-host sig read count\n");
1886 return CSR_RESULT_FAILURE;
1887 }
1888 card->from_host_signals_r = count_fhr; /* diag */
1889
1890 occupied_fh = (count_fhw - count_fhr) % 128;
1891 *space_fh = slots_fh - occupied_fh;
1892 }
1893
1894 return CSR_RESULT_SUCCESS;
1895} /* check_fh_sig_slots() */
1896
1897
1898/*
1899* If we are padding the From-Host signals to the SDIO block size,
1900* we need to round up the needed_chunks to the SDIO block size.
1901*/
1902#define ROUND_UP_NEEDED_CHUNKS(_card, _needed_chunks) \
1903 { \
1904 u16 _chunks_per_block; \
1905 u16 _chunks_in_last_block; \
1906 \
1907 if (_card->sdio_io_block_pad) \
1908 { \
1909 _chunks_per_block = _card->sdio_io_block_size / _card->config_data.sig_frag_size; \
1910 _chunks_in_last_block = _needed_chunks % _chunks_per_block; \
1911 if (_chunks_in_last_block != 0) \
1912 { \
1913 _needed_chunks = _needed_chunks + (_chunks_per_block - _chunks_in_last_block); \
1914 } \
1915 } \
1916 }
1917
1918
1919#define ROUND_UP_SPACE_CHUNKS(_card, _space_chunks) \
1920 { \
1921 u16 _chunks_per_block; \
1922 \
1923 if (_card->sdio_io_block_pad) \
1924 { \
1925 _chunks_per_block = _card->sdio_io_block_size / _card->config_data.sig_frag_size; \
1926 _space_chunks = ((_space_chunks / _chunks_per_block) * _chunks_per_block); \
1927 } \
1928 }
1929
1930
1931
1932
1933
1934/*
1935 * ---------------------------------------------------------------------------
1936 * process_fh_cmd_queue
1937 *
1938 * Take one signal off the from-host queue and copy it to the UniFi.
1939 * Does nothing if the UniFi has no slots free.
1940 *
1941 * Arguments:
1942 * card Pointer to card context struct
1943 * processed Location to write:
1944 * 0 if there is nothing on the queue to process
1945 * 1 if a signal was successfully processed
1946 *
1947 * Returns:
1948 * CSR error code if an error occurred.
1949 *
1950 * Notes:
1951 * The from-host queue contains signal requests from the network driver
1952 * and any UDI clients interspersed. UDI clients' requests have been stored
1953 * in the from-host queue using the wire-format structures, as they arrive.
1954 * All other requests are stored in the from-host queue using the host
1955 * (cpu specific) structures. We use the is_packed member of the card_signal_t
1956 * structure that describes the queue to make the distinction.
1957 * ---------------------------------------------------------------------------
1958 */
1959static CsrResult process_fh_cmd_queue(card_t *card, s32 *processed)
1960{
1961 q_t *sigq = &card->fh_command_queue;
1962
1963 CsrResult r;
1964 u16 pending_sigs;
1965 u16 pending_chunks;
1966 u16 needed_chunks;
1967 s32 space_chunks;
1968 u16 q_index;
1969
1970 *processed = 0;
1971
1972 /* Get the number of pending signals. */
1973 pending_sigs = CSR_WIFI_HIP_Q_SLOTS_USED(sigq);
1974 unifi_trace(card->ospriv, UDBG5, "proc_fh: %d pending\n", pending_sigs);
1975 if (pending_sigs == 0)
1976 {
1977 /* Nothing to do */
1978 return CSR_RESULT_SUCCESS;
1979 }
1980
1981 /* Work out how many chunks we have waiting to send */
1982 for (pending_chunks = 0, q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(sigq);
1983 q_index != CSR_WIFI_HIP_Q_NEXT_W_SLOT(sigq);
1984 q_index = CSR_WIFI_HIP_Q_WRAP(sigq, q_index + 1))
1985 {
1986 card_signal_t *csptr = CSR_WIFI_HIP_Q_SLOT_DATA(sigq, q_index);
1987
1988 /*
1989 * Note that GET_CHUNKS_FOR() needs the size of the packed
1990 * (wire-formatted) structure
1991 */
1992 pending_chunks += GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(csptr->signal_length + 2));
1993 }
1994
1995 /*
1996 * Check whether UniFi has space for all the buffered bulk-data
1997 * commands and signals as well.
1998 */
1999 needed_chunks = pending_chunks + card->fh_buffer.count;
2000
2001 /* Round up to the block size if necessary */
2002 ROUND_UP_NEEDED_CHUNKS(card, needed_chunks);
2003
2004 r = check_fh_sig_slots(card, needed_chunks, &space_chunks);
2005 if (r != CSR_RESULT_SUCCESS)
2006 {
2007 /* Error */
2008 unifi_error(card->ospriv, "Failed to read fh sig count\n");
2009 return r;
2010 }
2011
2012#ifdef CSR_WIFI_HIP_NOISY
2013 unifi_error(card->ospriv, "proc_fh: %d chunks free, need %d\n",
2014 space_chunks, needed_chunks);
2015#endif /* CSR_WIFI_HIP_NOISY */
2016
2017
2018 /*
2019 * Coalesce as many from-host signals as possible
2020 * into a single block and write using a single CMD53
2021 */
2022 if (needed_chunks > (u16)space_chunks)
2023 {
2024 /* Round up to the block size if necessary */
2025 ROUND_UP_SPACE_CHUNKS(card, space_chunks);
2026
2027 /*
2028 * If the f/w has less free chunks than those already pending
2029 * return immediately.
2030 */
2031 if ((u16)space_chunks <= card->fh_buffer.count)
2032 {
2033 /*
2034 * No room in UniFi for any signals after the buffered bulk
2035 * data commands have been sent.
2036 */
2037 unifi_error(card->ospriv, "not enough room to send signals, need %d chunks, %d free\n",
2038 card->fh_buffer.count, space_chunks);
2039 card->generate_interrupt = 1;
2040 return CSR_RESULT_SUCCESS;
2041 }
2042 pending_chunks = (u16)(space_chunks - card->fh_buffer.count);
2043 }
2044
2045 while (pending_sigs-- && pending_chunks > 0)
2046 {
2047 card_signal_t *csptr;
2048 s16 i;
2049 u16 sig_chunks, total_length, free_chunks_in_fh_buffer;
2050 bulk_data_param_t bulkdata;
2051 u8 *packed_sigptr;
2052 u16 signal_length = 0;
2053
2054 /* Retrieve the entry at the head of the queue */
2055 q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(sigq);
2056
2057 /* Get a pointer to the containing card_signal_t struct */
2058 csptr = CSR_WIFI_HIP_Q_SLOT_DATA(sigq, q_index);
2059
2060 /* Get the new length of the packed signal */
2061 signal_length = csptr->signal_length;
2062
2063 if ((signal_length & 1) || (signal_length > UNIFI_PACKED_SIGBUF_SIZE))
2064 {
2065 unifi_error(card->ospriv, "process_fh_queue: Bad len: %d\n", signal_length);
2066 return CSR_RESULT_FAILURE;
2067 }
2068
2069 /* Need space for 2-byte SDIO protocol header + signal */
2070 sig_chunks = GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(signal_length + 2));
2071
2072 free_chunks_in_fh_buffer = GET_CHUNKS_FOR(card->config_data.sig_frag_size,
2073 (u16)((card->fh_buffer.buf + UNIFI_FH_BUF_SIZE) - card->fh_buffer.ptr));
2074 if (free_chunks_in_fh_buffer < sig_chunks)
2075 {
2076 /* No more room */
2077 unifi_notice(card->ospriv, "proc_fh_cmd_q: no room in fh buffer for 0x%.4X, deferring\n",
2078 (u16)(GET_SIGNAL_ID(csptr->sigbuf)));
2079 break;
2080 }
2081
2082 packed_sigptr = csptr->sigbuf;
2083
2084 /* Claim and set up a from-host data slot */
2085 if (CSR_RESULT_FAILURE == CardWriteBulkData(card, csptr, UNIFI_TRAFFIC_Q_MLME))
2086 {
2087 unifi_notice(card->ospriv, "proc_fh_cmd_q: no fh data slots for 0x%.4X, deferring\n",
2088 (u16)(GET_SIGNAL_ID(csptr->sigbuf)));
2089 break;
2090 }
2091
2092 for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
2093 {
2094 if (csptr->bulkdata[i].data_length == 0)
2095 {
2096 UNIFI_INIT_BULK_DATA(&bulkdata.d[i]);
2097 }
2098 else
2099 {
2100 bulkdata.d[i].os_data_ptr = csptr->bulkdata[i].os_data_ptr;
2101 bulkdata.d[i].data_length = csptr->bulkdata[i].data_length;
2102 }
2103
2104 /* Pass the free responsibility to the lower layer. */
2105 UNIFI_INIT_BULK_DATA(&csptr->bulkdata[i]);
2106 }
2107
2108 unifi_trace(card->ospriv, UDBG2, "Sending signal 0x%.4X\n",
2109 GET_SIGNAL_ID(packed_sigptr));
2110#ifdef CSR_WIFI_HIP_NOISY
2111 unifi_error(card->ospriv, "Sending signal 0x%.4X\n",
2112 GET_SIGNAL_ID(packed_sigptr));
2113#endif /* CSR_WIFI_HIP_NOISY */
2114
2115
2116 /* Append packed signal to F-H buffer */
2117 total_length = sig_chunks * card->config_data.sig_frag_size;
2118
2119 card->fh_buffer.ptr[0] = (u8)(signal_length & 0xff);
2120 card->fh_buffer.ptr[1] =
2121 (u8)(((signal_length >> 8) & 0xf) | (SDIO_CMD_SIGNAL << 4));
2122
2123 memcpy(card->fh_buffer.ptr + 2, packed_sigptr, signal_length);
2124 memset(card->fh_buffer.ptr + 2 + signal_length, 0,
2125 total_length - (2 + signal_length));
2126
2127#ifdef CSR_WIFI_HIP_NOISY
2128 unifi_error(card->ospriv, "proc_fh: fh_buffer %d bytes \n",
2129 signal_length + 2);
2130 dump(card->fh_buffer.ptr, signal_length + 2);
2131 unifi_trace(card->ospriv, UDBG1, " \n");
2132#endif /* CSR_WIFI_HIP_NOISY */
2133
2134 card->fh_buffer.ptr += total_length;
2135 card->fh_buffer.count += sig_chunks;
2136
2137#ifdef CSR_WIFI_HIP_NOISY
2138 unifi_error(card->ospriv, "Added %d to fh buf, len now %d, count %d\n",
2139 signal_length,
2140 card->fh_buffer.ptr - card->fh_buffer.buf,
2141 card->fh_buffer.count);
2142#endif /* CSR_WIFI_HIP_NOISY */
2143
2144 (*processed)++;
2145 pending_chunks -= sig_chunks;
2146
2147 /* Log the signal to the UDI. */
2148 /* UDI will get the packed structure */
2149 /* Can not log the unpacked signal, unless we reconstruct it! */
2150 if (card->udi_hook)
2151 {
2152 (*card->udi_hook)(card->ospriv, packed_sigptr, signal_length,
2153 &bulkdata, UDI_LOG_FROM_HOST);
2154 }
2155
2156 /* Remove entry from q */
2157 csptr->signal_length = 0;
2158 CSR_WIFI_HIP_Q_INC_R(sigq);
2159 }
2160
2161 return CSR_RESULT_SUCCESS;
2162} /* process_fh_cmd_queue() */
2163
2164
2165/*
2166 * ---------------------------------------------------------------------------
2167 * process_fh_traffic_queue
2168 *
2169 * Take signals off the from-host queue and copy them to the UniFi.
2170 * Does nothing if the UniFi has no slots free.
2171 *
2172 * Arguments:
2173 * card Pointer to card context struct
2174 * sigq Pointer to the traffic queue
2175 * processed Pointer to location to write:
2176 * 0 if there is nothing on the queue to process
2177 * 1 if a signal was successfully processed
2178 *
2179 * Returns:
2180 * CSR error code if an error occurred.
2181 *
2182 * Notes:
2183 * The from-host queue contains signal requests from the network driver
2184 * and any UDI clients interspersed.
2185 * ---------------------------------------------------------------------------
2186 */
2187static CsrResult process_fh_traffic_queue(card_t *card, s32 *processed)
2188{
2189 q_t *sigq = card->fh_traffic_queue;
2190
2191 CsrResult r;
2192 s16 n = 0;
2193 s32 q_no;
2194 u16 pending_sigs = 0;
2195 u16 pending_chunks = 0;
2196 u16 needed_chunks;
2197 s32 space_chunks;
2198 u16 q_index;
2199 u32 host_tag = 0;
2200 u16 slot_num = 0;
2201
2202 *processed = 0;
2203
2204 /* calculate how many signals are in queues and how many chunks are needed. */
2205 for (n = UNIFI_NO_OF_TX_QS - 1; n >= 0; n--)
2206 {
2207 /* Get the number of pending signals. */
2208 pending_sigs += CSR_WIFI_HIP_Q_SLOTS_USED(&sigq[n]);
2209 unifi_trace(card->ospriv, UDBG5, "proc_fh%d: %d pending\n", n, pending_sigs);
2210
2211 /* Work out how many chunks we have waiting to send */
2212 for (q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(&sigq[n]);
2213 q_index != CSR_WIFI_HIP_Q_NEXT_W_SLOT(&sigq[n]);
2214 q_index = CSR_WIFI_HIP_Q_WRAP(&sigq[n], q_index + 1))
2215 {
2216 card_signal_t *csptr = CSR_WIFI_HIP_Q_SLOT_DATA(&sigq[n], q_index);
2217
2218 /*
2219 * Note that GET_CHUNKS_FOR() needs the size of the packed
2220 * (wire-formatted) structure
2221 */
2222 pending_chunks += GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(csptr->signal_length + 2));
2223 }
2224 }
2225
2226 /* If there are no pending signals, just return */
2227 if (pending_sigs == 0)
2228 {
2229 /* Nothing to do */
2230 return CSR_RESULT_SUCCESS;
2231 }
2232
2233 /*
2234 * Check whether UniFi has space for all the buffered bulk-data
2235 * commands and signals as well.
2236 */
2237 needed_chunks = pending_chunks + card->fh_buffer.count;
2238
2239 /* Round up to the block size if necessary */
2240 ROUND_UP_NEEDED_CHUNKS(card, needed_chunks);
2241
2242 r = check_fh_sig_slots(card, needed_chunks, &space_chunks);
2243 if (r != CSR_RESULT_SUCCESS)
2244 {
2245 /* Error */
2246 unifi_error(card->ospriv, "Failed to read fh sig count\n");
2247 return r;
2248 }
2249
2250#ifdef CSR_WIFI_HIP_NOISY
2251 unifi_error(card->ospriv,
2252 "process_fh_traffic_queue: %d chunks free, need %d\n",
2253 space_chunks, needed_chunks);
2254 read_fhsr(card); /* debugging only */
2255#endif /* CSR_WIFI_HIP_NOISY */
2256
2257 /* Coalesce as many from-host signals as possible
2258 into a single block and write using a single CMD53 */
2259 if (needed_chunks > (u16)space_chunks)
2260 {
2261 /* Round up to the block size if necessary */
2262 ROUND_UP_SPACE_CHUNKS(card, space_chunks);
2263
2264 if ((u16)space_chunks <= card->fh_buffer.count)
2265 {
2266 /*
2267 * No room in UniFi for any signals after the buffered bulk
2268 * data commands have been sent.
2269 */
2270 unifi_error(card->ospriv, "not enough room to send signals, need %d chunks, %d free\n",
2271 card->fh_buffer.count, space_chunks);
2272 card->generate_interrupt = 1;
2273 return 0;
2274 }
2275
2276 pending_chunks = (u16)space_chunks - card->fh_buffer.count;
2277 }
2278
2279 q_no = UNIFI_NO_OF_TX_QS - 1;
2280
2281 /*
2282 * pending_sigs will be exhausted if there are is no restriction to the pending
2283 * signals per queue. pending_chunks may be exhausted if there is a restriction.
2284 * q_no check will be exhausted if there is a restriction and our round-robin
2285 * algorith fails to fill all chunks.
2286 */
2287 do
2288 {
2289 card_signal_t *csptr;
2290 u16 sig_chunks, total_length, free_chunks_in_fh_buffer;
2291 bulk_data_param_t bulkdata;
2292 u8 *packed_sigptr;
2293 u16 signal_length = 0;
2294
2295 /* if this queue is empty go to next one. */
2296 if (CSR_WIFI_HIP_Q_SLOTS_USED(&sigq[q_no]) == 0)
2297 {
2298 q_no--;
2299 continue;
2300 }
2301
2302 /* Retrieve the entry at the head of the queue */
2303 q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(&sigq[q_no]);
2304
2305 /* Get a pointer to the containing card_signal_t struct */
2306 csptr = CSR_WIFI_HIP_Q_SLOT_DATA(&sigq[q_no], q_index);
2307
2308 /* Get the new length of the packed signal */
2309 signal_length = csptr->signal_length;
2310
2311 if ((signal_length & 1) || (signal_length > UNIFI_PACKED_SIGBUF_SIZE))
2312 {
2313 unifi_error(card->ospriv, "process_fh_traffic_queue: Bad len: %d\n", signal_length);
2314 return CSR_RESULT_FAILURE;
2315 }
2316
2317 /* Need space for 2-byte SDIO protocol header + signal */
2318 sig_chunks = GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(signal_length + 2));
2319 free_chunks_in_fh_buffer = GET_CHUNKS_FOR(card->config_data.sig_frag_size,
2320 (u16)((card->fh_buffer.buf + UNIFI_FH_BUF_SIZE) - card->fh_buffer.ptr));
2321 if (free_chunks_in_fh_buffer < sig_chunks)
2322 {
2323 /* No more room */
2324 unifi_notice(card->ospriv, "process_fh_traffic_queue: no more chunks.\n");
2325 break;
2326 }
2327
2328 packed_sigptr = csptr->sigbuf;
2329 /* Claim and set up a from-host data slot */
2330 if (CSR_RESULT_FAILURE == CardWriteBulkData(card, csptr, (unifi_TrafficQueue)q_no))
2331 {
2332 q_no--;
2333 continue;
2334 }
2335
2336 /* Sanity check: MA-PACKET.req must have a valid bulk data */
2337 if ((csptr->bulkdata[0].data_length == 0) || (csptr->bulkdata[0].os_data_ptr == NULL))
2338 {
2339 unifi_error(card->ospriv, "MA-PACKET.req with empty bulk data (%d bytes in %p)\n",
2340 csptr->bulkdata[0].data_length, csptr->bulkdata[0].os_data_ptr);
2341 dump(packed_sigptr, signal_length);
2342 return CSR_RESULT_FAILURE;
2343 }
2344
2345 bulkdata.d[0].os_data_ptr = csptr->bulkdata[0].os_data_ptr;
2346 bulkdata.d[0].data_length = csptr->bulkdata[0].data_length;
2347 bulkdata.d[0].os_net_buf_ptr = csptr->bulkdata[0].os_net_buf_ptr;
2348 bulkdata.d[0].net_buf_length = csptr->bulkdata[0].net_buf_length;
2349
2350 /* The driver owns clearing of HIP slots for following scenario
2351 * - driver has requested a MA-PACKET.req signal
2352 * - The f/w after receiving the signal decides it can't send it out due to various reasons
2353 * - So the f/w without downloading the bulk data decides to just send a confirmation with fail
2354 * - and then sends a clear slot signal to HIP
2355 *
2356 * But in some cases the clear slot signal never comes and the slot remains --NOT-- freed for ever
2357 *
2358 * To handle this, HIP will keep the record of host tag for each occupied slot
2359 * and then based on status of that Host tag and slot the driver will decide if the slot is
2360 * cleared by f/w signal or the slot has to be freed by driver
2361 */
2362
2363 if (card->fh_slot_host_tag_record)
2364 {
2365 /* Update the f-h slot record for the corresponding host tag */
2366 host_tag = GET_PACKED_MA_PACKET_REQUEST_HOST_TAG(packed_sigptr);
2367 slot_num = GET_PACKED_DATAREF_SLOT(packed_sigptr, 0) & 0x00FF;
2368
2369 unifi_trace(card->ospriv, UDBG5,
2370 "process_fh_traffic_queue signal ID =%x fh slot=%x Host tag =%x\n",
2371 GET_SIGNAL_ID(packed_sigptr), slot_num, host_tag);
2372 card->fh_slot_host_tag_record[slot_num] = host_tag;
2373 }
2374 UNIFI_INIT_BULK_DATA(&bulkdata.d[1]);
2375 UNIFI_INIT_BULK_DATA(&csptr->bulkdata[0]);
2376 UNIFI_INIT_BULK_DATA(&csptr->bulkdata[1]);
2377
2378#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
2379 if (bulkdata.d[0].os_data_ptr)
2380 {
2381 if ((*bulkdata.d[0].os_data_ptr) & 0x08)
2382 {
2383 card->cmd_prof.tx_count++;
2384 }
2385 }
2386#endif
2387 unifi_trace(card->ospriv, UDBG3, "Sending signal 0x%.4X\n",
2388 GET_SIGNAL_ID(packed_sigptr));
2389#ifdef CSR_WIFI_HIP_NOISY
2390 unifi_error(card->ospriv, "Sending signal 0x%.4X\n",
2391 GET_SIGNAL_ID(packed_sigptr));
2392#endif /* CSR_WIFI_HIP_NOISY */
2393
2394 /* Append packed signal to F-H buffer */
2395 total_length = sig_chunks * card->config_data.sig_frag_size;
2396
2397 card->fh_buffer.ptr[0] = (u8)(signal_length & 0xff);
2398 card->fh_buffer.ptr[1] =
2399 (u8)(((signal_length >> 8) & 0xf) | (SDIO_CMD_SIGNAL << 4));
2400
2401 memcpy(card->fh_buffer.ptr + 2, packed_sigptr, signal_length);
2402 memset(card->fh_buffer.ptr + 2 + signal_length, 0,
2403 total_length - (2 + signal_length));
2404
2405#ifdef CSR_WIFI_HIP_NOISY
2406 unifi_error(card->ospriv, "proc_fh: fh_buffer %d bytes \n",
2407 signal_length + 2);
2408 dump(card->fh_buffer.ptr, signal_length + 2);
2409 unifi_trace(card->ospriv, UDBG1, " \n");
2410#endif /* CSR_WIFI_HIP_NOISY */
2411
2412 card->fh_buffer.ptr += total_length;
2413 card->fh_buffer.count += sig_chunks;
2414
2415#ifdef CSR_WIFI_HIP_NOISY
2416 unifi_error(card->ospriv, "Added %d to fh buf, len now %d, count %d\n",
2417 signal_length,
2418 card->fh_buffer.ptr - card->fh_buffer.buf,
2419 card->fh_buffer.count);
2420#endif /* CSR_WIFI_HIP_NOISY */
2421
2422 (*processed)++;
2423 pending_sigs--;
2424 pending_chunks -= sig_chunks;
2425
2426 /* Log the signal to the UDI. */
2427 /* UDI will get the packed structure */
2428 /* Can not log the unpacked signal, unless we reconstruct it! */
2429 if (card->udi_hook)
2430 {
2431 (*card->udi_hook)(card->ospriv, packed_sigptr, signal_length,
2432 &bulkdata, UDI_LOG_FROM_HOST);
2433 }
2434
2435 /* Remove entry from q */
2436 csptr->signal_length = 0;
2437 /* Note that the traffic queue has only one valid bulk data buffer. */
2438 csptr->bulkdata[0].data_length = 0;
2439
2440 CSR_WIFI_HIP_Q_INC_R(&sigq[q_no]);
2441 } while ((pending_sigs > 0) && (pending_chunks > 0) && (q_no >= 0));
2442
2443 return CSR_RESULT_SUCCESS;
2444} /* process_fh_traffic_queue() */
2445
2446
2447/*
2448 * ---------------------------------------------------------------------------
2449 * flush_fh_buffer
2450 *
2451 * Write out the cache from-hosts signals to the UniFi.
2452 *
2453 * Arguments:
2454 * card Pointer to card context struct
2455 *
2456 * Returns:
2457 * CSR error code if an SDIO error occurred.
2458 * ---------------------------------------------------------------------------
2459 */
2460static CsrResult flush_fh_buffer(card_t *card)
2461{
2462 CsrResult r;
2463 u16 len;
2464 u16 sig_units;
2465 u16 data_round;
2466 u16 chunks_in_last_block;
2467 u16 padding_chunks;
2468 u16 i;
2469
2470 len = card->fh_buffer.ptr - card->fh_buffer.buf;
2471
2472#ifdef CSR_WIFI_HIP_NOISY
2473 unifi_error(card->ospriv, "fh_buffer is at %p, ptr= %p\n",
2474 card->fh_buffer.buf, card->fh_buffer.ptr);
2475#endif /* CSR_WIFI_HIP_NOISY */
2476
2477 if (len == 0)
2478 {
2479 return CSR_RESULT_SUCCESS;
2480 }
2481
2482#ifdef CSR_WIFI_HIP_NOISY
2483 if (dump_fh_buf)
2484 {
2485 dump(card->fh_buffer.buf, len);
2486 dump_fh_buf = 0;
2487 }
2488#endif /* CSR_WIFI_HIP_NOISY */
2489
2490 if (card->sdio_io_block_pad)
2491 {
2492 /* Both of these are powers of 2 */
2493 sig_units = card->config_data.sig_frag_size;
2494 data_round = card->sdio_io_block_size;
2495
2496 if (data_round > sig_units)
2497 {
2498 chunks_in_last_block = (len % data_round) / sig_units;
2499
2500 if (chunks_in_last_block != 0)
2501 {
2502 padding_chunks = (data_round / sig_units) - chunks_in_last_block;
2503
2504 memset(card->fh_buffer.ptr, 0, padding_chunks * sig_units);
2505 for (i = 0; i < padding_chunks; i++)
2506 {
2507 card->fh_buffer.ptr[1] = SDIO_CMD_PADDING << 4;
2508 card->fh_buffer.ptr += sig_units;
2509 }
2510
2511 card->fh_buffer.count += padding_chunks;
2512 len += padding_chunks * sig_units;
2513 }
2514 }
2515 }
2516
2517 r = unifi_bulk_rw(card,
2518 card->config_data.fromhost_sigbuf_handle,
2519 card->fh_buffer.buf,
2520 len, UNIFI_SDIO_WRITE);
2521 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
2522 {
2523 return r;
2524 }
2525 if (r != CSR_RESULT_SUCCESS)
2526 {
2527 unifi_error(card->ospriv, "Failed to write fh signals: %u bytes, error %d\n", len, r);
2528 return r;
2529 }
2530
2531 /* Update from-host-signals-written signal count */
2532 card->from_host_signals_w =
2533 (card->from_host_signals_w + card->fh_buffer.count) % 128u;
2534 r = unifi_write_8_or_16(card, card->sdio_ctrl_addr + 0,
2535 (u8)card->from_host_signals_w);
2536 if (r != CSR_RESULT_SUCCESS)
2537 {
2538 unifi_error(card->ospriv, "Failed to write fh signal count %u with error %d\n",
2539 card->from_host_signals_w, r);
2540 return r;
2541 }
2542 card->generate_interrupt = 1;
2543
2544 /* Reset the fh buffer pointer */
2545 card->fh_buffer.ptr = card->fh_buffer.buf;
2546 card->fh_buffer.count = 0;
2547
2548#ifdef CSR_WIFI_HIP_NOISY
2549 unifi_error(card->ospriv, "END flush: fh len %d, count %d\n",
2550 card->fh_buffer.ptr - card->fh_buffer.buf,
2551 card->fh_buffer.count);
2552#endif /* CSR_WIFI_HIP_NOISY */
2553
2554 return CSR_RESULT_SUCCESS;
2555} /* flush_fh_buffer() */
2556
2557
2558/*
2559 * ---------------------------------------------------------------------------
2560 * restart_packet_flow
2561 *
2562 * This function is called before the bottom-half thread sleeps.
2563 * It checks whether both data and signal resources are available and
2564 * then calls the OS-layer function to re-enable packet transmission.
2565 *
2566 * Arguments:
2567 * card Pointer to card context struct
2568 *
2569 * Returns:
2570 * None.
2571 * ---------------------------------------------------------------------------
2572 */
2573static void restart_packet_flow(card_t *card)
2574{
2575 u8 q;
2576
2577 /*
2578 * We only look at the fh_traffic_queue, because that is where packets from
2579 * the network stack are placed.
2580 */
2581 for (q = 0; q <= UNIFI_TRAFFIC_Q_VO; q++)
2582 {
2583 if (card_is_tx_q_paused(card, q) &&
2584 CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_traffic_queue[q]) >= RESUME_XMIT_THRESHOLD)
2585 {
2586#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
2587 unifi_debug_log_to_buf("U");
2588#endif
2589 card_tx_q_unpause(card, q);
2590 unifi_restart_xmit(card->ospriv, (unifi_TrafficQueue)q);
2591 }
2592 }
2593} /* restart_packet_flow() */
2594
2595
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c b/drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c
deleted file mode 100644
index 17867f60df16..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c
+++ /dev/null
@@ -1,1713 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2012
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ---------------------------------------------------------------------------
13 * FILE: csr_wifi_hip_card_sdio_mem.c
14 *
15 * PURPOSE: Implementation of the Card API for SDIO.
16 *
17 * ---------------------------------------------------------------------------
18 */
19#include "csr_wifi_hip_unifi.h"
20#include "csr_wifi_hip_card.h"
21
22#define SDIO_RETRIES 3
23#define CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH 16
24
25
26#define retryable_sdio_error(_csrResult) (((_csrResult) == CSR_SDIO_RESULT_CRC_ERROR) || ((_csrResult) == CSR_SDIO_RESULT_TIMEOUT))
27
28
29/*
30 * ---------------------------------------------------------------------------
31 * retrying_read8
32 * retrying_write8
33 *
34 * These functions provide the first level of retry for SDIO operations.
35 * If an SDIO command fails for reason of a response timeout or CRC
36 * error, it is retried immediately. If three attempts fail we report a
37 * failure.
38 * If the command failed for any other reason, the failure is reported
39 * immediately.
40 *
41 * Arguments:
42 * card Pointer to card structure.
43 * funcnum The SDIO function to access.
44 * Function 0 is the Card Configuration Register space,
45 * function 1/2 is the UniFi register space.
46 * addr Address to access
47 * pdata Pointer in which to return the value read.
48 * data Value to write.
49 *
50 * Returns:
51 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
52 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
53 * CSR_RESULT_FAILURE an SDIO error occurred
54 * ---------------------------------------------------------------------------
55 */
56static CsrResult retrying_read8(card_t *card, s16 funcnum, u32 addr, u8 *pdata)
57{
58 CsrSdioFunction *sdio = card->sdio_if;
59 CsrResult r = CSR_RESULT_SUCCESS;
60 s16 retries;
61 CsrResult csrResult = CSR_RESULT_SUCCESS;
62
63 retries = 0;
64 while (retries++ < SDIO_RETRIES)
65 {
66 if (funcnum == 0)
67 {
68#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
69 unifi_debug_log_to_buf("r0@%02X", addr);
70#endif
71 csrResult = CsrSdioF0Read8(sdio, addr, pdata);
72 }
73 else
74 {
75#ifdef CSR_WIFI_TRANSPORT_CSPI
76 unifi_error(card->ospriv,
77 "retrying_read_f0_8: F1 8-bit reads are not allowed.\n");
78 return CSR_RESULT_FAILURE;
79#else
80#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
81 unifi_debug_log_to_buf("r@%02X", addr);
82#endif
83 csrResult = CsrSdioRead8(sdio, addr, pdata);
84#endif
85 }
86#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
87 if (csrResult != CSR_RESULT_SUCCESS)
88 {
89 unifi_debug_log_to_buf("error=%X\n", csrResult);
90 }
91 else
92 {
93 unifi_debug_log_to_buf("=%X\n", *pdata);
94 }
95#endif
96 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
97 {
98 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
99 }
100 /*
101 * Try again for retryable (CRC or TIMEOUT) errors,
102 * break on success or fatal error
103 */
104 if (!retryable_sdio_error(csrResult))
105 {
106#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
107 card->cmd_prof.cmd52_count++;
108#endif
109 break;
110 }
111 unifi_trace(card->ospriv, UDBG2, "retryable SDIO error reading F%d 0x%lX\n", funcnum, addr);
112 }
113
114 if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
115 {
116 unifi_warning(card->ospriv, "Read succeeded after %d attempts\n", retries);
117 }
118
119 if (csrResult != CSR_RESULT_SUCCESS)
120 {
121 unifi_error(card->ospriv, "Failed to read from UniFi (addr 0x%lX) after %d tries\n",
122 addr, retries - 1);
123 /* Report any SDIO error as a general i/o error */
124 r = CSR_RESULT_FAILURE;
125 }
126
127 return r;
128} /* retrying_read8() */
129
130
131static CsrResult retrying_write8(card_t *card, s16 funcnum, u32 addr, u8 data)
132{
133 CsrSdioFunction *sdio = card->sdio_if;
134 CsrResult r = CSR_RESULT_SUCCESS;
135 s16 retries;
136 CsrResult csrResult = CSR_RESULT_SUCCESS;
137
138 retries = 0;
139 while (retries++ < SDIO_RETRIES)
140 {
141 if (funcnum == 0)
142 {
143#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
144 unifi_debug_log_to_buf("w0@%02X=%X", addr, data);
145#endif
146 csrResult = CsrSdioF0Write8(sdio, addr, data);
147 }
148 else
149 {
150#ifdef CSR_WIFI_TRANSPORT_CSPI
151 unifi_error(card->ospriv,
152 "retrying_write_f0_8: F1 8-bit writes are not allowed.\n");
153 return CSR_RESULT_FAILURE;
154#else
155#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
156 unifi_debug_log_to_buf("w@%02X=%X", addr, data);
157#endif
158 csrResult = CsrSdioWrite8(sdio, addr, data);
159#endif
160 }
161#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
162 if (csrResult != CSR_RESULT_SUCCESS)
163 {
164 unifi_debug_log_to_buf(",error=%X", csrResult);
165 }
166 unifi_debug_string_to_buf("\n");
167#endif
168 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
169 {
170 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
171 }
172 /*
173 * Try again for retryable (CRC or TIMEOUT) errors,
174 * break on success or fatal error
175 */
176 if (!retryable_sdio_error(csrResult))
177 {
178#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
179 card->cmd_prof.cmd52_count++;
180#endif
181 break;
182 }
183 unifi_trace(card->ospriv, UDBG2, "retryable SDIO error writing %02X to F%d 0x%lX\n",
184 data, funcnum, addr);
185 }
186
187 if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
188 {
189 unifi_warning(card->ospriv, "Write succeeded after %d attempts\n", retries);
190 }
191
192 if (csrResult != CSR_RESULT_SUCCESS)
193 {
194 unifi_error(card->ospriv, "Failed to write to UniFi (addr 0x%lX) after %d tries\n",
195 addr, retries - 1);
196 /* Report any SDIO error as a general i/o error */
197 r = CSR_RESULT_FAILURE;
198 }
199
200 return r;
201} /* retrying_write8() */
202
203
204static CsrResult retrying_read16(card_t *card, s16 funcnum,
205 u32 addr, u16 *pdata)
206{
207 CsrSdioFunction *sdio = card->sdio_if;
208 CsrResult r = CSR_RESULT_SUCCESS;
209 s16 retries;
210 CsrResult csrResult = CSR_RESULT_SUCCESS;
211
212 retries = 0;
213 while (retries++ < SDIO_RETRIES)
214 {
215#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
216 unifi_debug_log_to_buf("r@%02X", addr);
217#endif
218 csrResult = CsrSdioRead16(sdio, addr, pdata);
219#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
220 if (csrResult != CSR_RESULT_SUCCESS)
221 {
222 unifi_debug_log_to_buf("error=%X\n", csrResult);
223 }
224 else
225 {
226 unifi_debug_log_to_buf("=%X\n", *pdata);
227 }
228#endif
229 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
230 {
231 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
232 }
233
234 /*
235 * Try again for retryable (CRC or TIMEOUT) errors,
236 * break on success or fatal error
237 */
238 if (!retryable_sdio_error(csrResult))
239 {
240#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
241 card->cmd_prof.cmd52_count++;
242#endif
243 break;
244 }
245 unifi_trace(card->ospriv, UDBG2, "retryable SDIO error reading F%d 0x%lX\n", funcnum, addr);
246 }
247
248 if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
249 {
250 unifi_warning(card->ospriv, "Read succeeded after %d attempts\n", retries);
251 }
252
253 if (csrResult != CSR_RESULT_SUCCESS)
254 {
255 unifi_error(card->ospriv, "Failed to read from UniFi (addr 0x%lX) after %d tries\n",
256 addr, retries - 1);
257 /* Report any SDIO error as a general i/o error */
258 r = CSR_RESULT_FAILURE;
259 }
260
261 return r;
262} /* retrying_read16() */
263
264
265static CsrResult retrying_write16(card_t *card, s16 funcnum,
266 u32 addr, u16 data)
267{
268 CsrSdioFunction *sdio = card->sdio_if;
269 CsrResult r = CSR_RESULT_SUCCESS;
270 s16 retries;
271 CsrResult csrResult = CSR_RESULT_SUCCESS;
272
273 retries = 0;
274 while (retries++ < SDIO_RETRIES)
275 {
276#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
277 unifi_debug_log_to_buf("w@%02X=%X", addr, data);
278#endif
279 csrResult = CsrSdioWrite16(sdio, addr, data);
280#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
281 if (csrResult != CSR_RESULT_SUCCESS)
282 {
283 unifi_debug_log_to_buf(",error=%X", csrResult);
284 }
285 unifi_debug_string_to_buf("\n");
286#endif
287 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
288 {
289 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
290 }
291
292 /*
293 * Try again for retryable (CRC or TIMEOUT) errors,
294 * break on success or fatal error
295 */
296 if (!retryable_sdio_error(csrResult))
297 {
298#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
299 card->cmd_prof.cmd52_count++;
300#endif
301 break;
302 }
303 unifi_trace(card->ospriv, UDBG2, "retryable SDIO error writing %02X to F%d 0x%lX\n",
304 data, funcnum, addr);
305 }
306
307 if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
308 {
309 unifi_warning(card->ospriv, "Write succeeded after %d attempts\n", retries);
310 }
311
312 if (csrResult != CSR_RESULT_SUCCESS)
313 {
314 unifi_error(card->ospriv, "Failed to write to UniFi (addr 0x%lX) after %d tries\n",
315 addr, retries - 1);
316 /* Report any SDIO error as a general i/o error */
317 r = CSR_RESULT_FAILURE;
318 }
319
320 return r;
321} /* retrying_write16() */
322
323
324/*
325 * ---------------------------------------------------------------------------
326 * sdio_read_f0
327 *
328 * Reads a byte value from the CCCR (func 0) area of UniFi.
329 *
330 * Arguments:
331 * card Pointer to card structure.
332 * addr Address to read from
333 * pdata Pointer in which to store the read value.
334 *
335 * Returns:
336 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
337 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
338 * CSR_RESULT_FAILURE an SDIO error occurred
339 * ---------------------------------------------------------------------------
340 */
341CsrResult sdio_read_f0(card_t *card, u32 addr, u8 *pdata)
342{
343#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
344 card->cmd_prof.cmd52_f0_r_count++;
345#endif
346 return retrying_read8(card, 0, addr, pdata);
347} /* sdio_read_f0() */
348
349
350/*
351 * ---------------------------------------------------------------------------
352 * sdio_write_f0
353 *
354 * Writes a byte value to the CCCR (func 0) area of UniFi.
355 *
356 * Arguments:
357 * card Pointer to card structure.
358 * addr Address to read from
359 * data Data value to write.
360 *
361 * Returns:
362 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
363 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
364 * CSR_RESULT_FAILURE an SDIO error occurred
365 * ---------------------------------------------------------------------------
366 */
367CsrResult sdio_write_f0(card_t *card, u32 addr, u8 data)
368{
369#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
370 card->cmd_prof.cmd52_f0_w_count++;
371#endif
372 return retrying_write8(card, 0, addr, data);
373} /* sdio_write_f0() */
374
375
376/*
377 * ---------------------------------------------------------------------------
378 * unifi_read_direct_8_or_16
379 *
380 * Read a 8-bit value from the UniFi SDIO interface.
381 *
382 * Arguments:
383 * card Pointer to card structure.
384 * addr Address to read from
385 * pdata Pointer in which to return data.
386 *
387 * Returns:
388 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
389 * ---------------------------------------------------------------------------
390 */
391CsrResult unifi_read_direct_8_or_16(card_t *card, u32 addr, u8 *pdata)
392{
393#ifdef CSR_WIFI_TRANSPORT_CSPI
394 u16 w;
395 CsrResult r;
396
397 r = retrying_read16(card, card->function, addr, &w);
398 *pdata = (u8)(w & 0xFF);
399 return r;
400#else
401 return retrying_read8(card, card->function, addr, pdata);
402#endif
403} /* unifi_read_direct_8_or_16() */
404
405
406/*
407 * ---------------------------------------------------------------------------
408 * unifi_write_direct_8_or_16
409 *
410 * Write a byte value to the UniFi SDIO interface.
411 *
412 * Arguments:
413 * card Pointer to card structure.
414 * addr Address to write to
415 * data Value to write.
416 *
417 * Returns:
418 * CSR_RESULT_SUCCESS on success, non-zero error code on error
419 *
420 * Notes:
421 * If 8-bit write is used, the even address *must* be written second.
422 * This is because writes to odd bytes are cached and not committed
423 * to memory until the preceding even address is written.
424 * ---------------------------------------------------------------------------
425 */
426CsrResult unifi_write_direct_8_or_16(card_t *card, u32 addr, u8 data)
427{
428 if (addr & 1)
429 {
430 unifi_warning(card->ospriv,
431 "Warning: Byte write to an odd address (0x%lX) is dangerous\n",
432 addr);
433 }
434
435#ifdef CSR_WIFI_TRANSPORT_CSPI
436 return retrying_write16(card, card->function, addr, (u16)data);
437#else
438 return retrying_write8(card, card->function, addr, data);
439#endif
440} /* unifi_write_direct_8_or_16() */
441
442
443/*
444 * ---------------------------------------------------------------------------
445 * unifi_read_direct16
446 *
447 * Read a 16-bit value from the UniFi SDIO interface.
448 *
449 * Arguments:
450 * card Pointer to card structure.
451 * addr Address to read from
452 * pdata Pointer in which to return data.
453 *
454 * Returns:
455 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
456 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
457 * CSR_RESULT_FAILURE an SDIO error occurred
458 *
459 * Notes:
460 * The even address *must* be read first. This is because reads from
461 * odd bytes are cached and read from memory when the preceding
462 * even address is read.
463 * ---------------------------------------------------------------------------
464 */
465CsrResult unifi_read_direct16(card_t *card, u32 addr, u16 *pdata)
466{
467 return retrying_read16(card, card->function, addr, pdata);
468} /* unifi_read_direct16() */
469
470
471/*
472 * ---------------------------------------------------------------------------
473 * unifi_write_direct16
474 *
475 * Write a 16-bit value to the UniFi SDIO interface.
476 *
477 * Arguments:
478 * card Pointer to card structure.
479 * addr Address to write to
480 * data Value to write.
481 *
482 * Returns:
483 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
484 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
485 * CSR_RESULT_FAILURE an SDIO error occurred
486 *
487 * Notes:
488 * The even address *must* be written second. This is because writes to
489 * odd bytes are cached and not committed to memory until the preceding
490 * even address is written.
491 * ---------------------------------------------------------------------------
492 */
493CsrResult unifi_write_direct16(card_t *card, u32 addr, u16 data)
494{
495 return retrying_write16(card, card->function, addr, data);
496} /* unifi_write_direct16() */
497
498
499/*
500 * ---------------------------------------------------------------------------
501 * unifi_read_direct32
502 *
503 * Read a 32-bit value from the UniFi SDIO interface.
504 *
505 * Arguments:
506 * card Pointer to card structure.
507 * addr Address to read from
508 * pdata Pointer in which to return data.
509 *
510 * Returns:
511 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
512 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
513 * CSR_RESULT_FAILURE an SDIO error occurred
514 * ---------------------------------------------------------------------------
515 */
516CsrResult unifi_read_direct32(card_t *card, u32 addr, u32 *pdata)
517{
518 CsrResult r;
519 u16 w0, w1;
520
521 r = retrying_read16(card, card->function, addr, &w0);
522 if (r != CSR_RESULT_SUCCESS)
523 {
524 return r;
525 }
526
527 r = retrying_read16(card, card->function, addr + 2, &w1);
528 if (r != CSR_RESULT_SUCCESS)
529 {
530 return r;
531 }
532
533 *pdata = ((u32)w1 << 16) | (u32)w0;
534
535 return CSR_RESULT_SUCCESS;
536} /* unifi_read_direct32() */
537
538
539/*
540 * ---------------------------------------------------------------------------
541 * unifi_read_directn_match
542 *
543 * Read multiple 8-bit values from the UniFi SDIO interface,
544 * stopping when either we have read 'len' bytes or we have read
545 * a octet equal to 'match'. If 'match' is not a valid octet
546 * then this function is the same as 'unifi_read_directn'.
547 *
548 * Arguments:
549 * card Pointer to card structure.
550 * addr Start address to read from.
551 * pdata Pointer to which to write data.
552 * len Maximum umber of bytes to read
553 * match The value to stop reading at.
554 * num Pointer to buffer to write number of bytes read
555 *
556 * Returns:
557 * number of octets read on success, negative error code on error:
558 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
559 * CSR_RESULT_FAILURE an SDIO error occurred
560 *
561 * Notes:
562 * The even address *must* be read first. This is because reads from
563 * odd bytes are cached and read from memory when the preceding
564 * even address is read.
565 * ---------------------------------------------------------------------------
566 */
567static CsrResult unifi_read_directn_match(card_t *card, u32 addr, void *pdata, u16 len, s8 m, u32 *num)
568{
569 CsrResult r;
570 u32 i;
571 u8 *cptr;
572 u16 w;
573
574 *num = 0;
575
576 cptr = (u8 *)pdata;
577 for (i = 0; i < len; i += 2)
578 {
579 r = retrying_read16(card, card->function, addr, &w);
580 if (r != CSR_RESULT_SUCCESS)
581 {
582 return r;
583 }
584
585 *cptr++ = ((u8)w & 0xFF);
586 if ((m >= 0) && (((s8)w & 0xFF) == m))
587 {
588 break;
589 }
590
591 if (i + 1 == len)
592 {
593 /* The len is odd. Ignore the last high byte */
594 break;
595 }
596
597 *cptr++ = ((u8)(w >> 8) & 0xFF);
598 if ((m >= 0) && (((s8)(w >> 8) & 0xFF) == m))
599 {
600 break;
601 }
602
603 addr += 2;
604 }
605
606 *num = (s32)(cptr - (u8 *)pdata);
607 return CSR_RESULT_SUCCESS;
608}
609
610
611/*
612 * ---------------------------------------------------------------------------
613 * unifi_read_directn
614 *
615 * Read multiple 8-bit values from the UniFi SDIO interface.
616 *
617 * Arguments:
618 * card Pointer to card structure.
619 * addr Start address to read from.
620 * pdata Pointer to which to write data.
621 * len Number of bytes to read
622 *
623 * Returns:
624 * 0 on success, non-zero error code on error:
625 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
626 * CSR_RESULT_FAILURE an SDIO error occurred
627 *
628 * Notes:
629 * The even address *must* be read first. This is because reads from
630 * odd bytes are cached and read from memory when the preceding
631 * even address is read.
632 * ---------------------------------------------------------------------------
633 */
634CsrResult unifi_read_directn(card_t *card, u32 addr, void *pdata, u16 len)
635{
636 u32 num;
637
638 return unifi_read_directn_match(card, addr, pdata, len, -1, &num);
639} /* unifi_read_directn() */
640
641
642/*
643 * ---------------------------------------------------------------------------
644 * unifi_write_directn
645 *
646 * Write multiple 8-bit values to the UniFi SDIO interface.
647 *
648 * Arguments:
649 * card Pointer to card structure.
650 * addr Start address to write to.
651 * pdata Source data pointer.
652 * len Number of bytes to write, must be even.
653 *
654 * Returns:
655 * 0 on success, non-zero error code on error:
656 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
657 * CSR_RESULT_FAILURE an SDIO error occurred
658 *
659 * Notes:
660 * The UniFi has a peculiar 16-bit bus architecture. Writes are only
661 * committed to memory when an even address is accessed. Writes to
662 * odd addresses are cached and only committed if the next write is
663 * to the preceding address.
664 * This means we must write data as pairs of bytes in reverse order.
665 * ---------------------------------------------------------------------------
666 */
667CsrResult unifi_write_directn(card_t *card, u32 addr, void *pdata, u16 len)
668{
669 CsrResult r;
670 u8 *cptr;
671 s16 signed_len;
672
673 cptr = (u8 *)pdata;
674 signed_len = (s16)len;
675 while (signed_len > 0)
676 {
677 /* This is UniFi-1 specific code. CSPI not supported so 8-bit write allowed */
678 r = retrying_write16(card, card->function, addr, *cptr);
679 if (r != CSR_RESULT_SUCCESS)
680 {
681 return r;
682 }
683
684 cptr += 2;
685 addr += 2;
686 signed_len -= 2;
687 }
688
689 return CSR_RESULT_SUCCESS;
690} /* unifi_write_directn() */
691
692
693/*
694 * ---------------------------------------------------------------------------
695 * set_dmem_page
696 * set_pmem_page
697 *
698 * Set up the page register for the shared data memory window or program
699 * memory window.
700 *
701 * Arguments:
702 * card Pointer to card structure.
703 * dmem_addr UniFi shared-data-memory address to access.
704 * pmem_addr UniFi program memory address to access. This includes
705 * External FLASH memory at 0x000000
706 * Processor program memory at 0x200000
707 * External SRAM at memory 0x400000
708 * paddr Location to write an SDIO address (24-bit) for
709 * use in a unifi_read_direct or unifi_write_direct call.
710 *
711 * Returns:
712 * CSR_RESULT_SUCCESS on success
713 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
714 * CSR_RESULT_FAILURE an SDIO error occurred
715 * ---------------------------------------------------------------------------
716 */
717static CsrResult set_dmem_page(card_t *card, u32 dmem_addr, u32 *paddr)
718{
719 u16 page, addr;
720 u32 len;
721 CsrResult r;
722
723 *paddr = 0;
724
725 if (!ChipHelper_DecodeWindow(card->helper,
726 CHIP_HELPER_WINDOW_3,
727 CHIP_HELPER_WT_SHARED,
728 dmem_addr / 2,
729 &page, &addr, &len))
730 {
731 unifi_error(card->ospriv, "Failed to decode SHARED_DMEM_PAGE %08lx\n", dmem_addr);
732 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
733 }
734
735 if (page != card->dmem_page)
736 {
737 unifi_trace(card->ospriv, UDBG6, "setting dmem page=0x%X, addr=0x%lX\n", page, addr);
738
739 /* change page register */
740 r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW3_PAGE(card->helper) * 2, page);
741 if (r != CSR_RESULT_SUCCESS)
742 {
743 unifi_error(card->ospriv, "Failed to write SHARED_DMEM_PAGE\n");
744 return r;
745 }
746
747 card->dmem_page = page;
748 }
749
750 *paddr = ((s32)addr * 2) + (dmem_addr & 1);
751
752 return CSR_RESULT_SUCCESS;
753} /* set_dmem_page() */
754
755
756static CsrResult set_pmem_page(card_t *card, u32 pmem_addr,
757 enum chip_helper_window_type mem_type, u32 *paddr)
758{
759 u16 page, addr;
760 u32 len;
761 CsrResult r;
762
763 *paddr = 0;
764
765 if (!ChipHelper_DecodeWindow(card->helper,
766 CHIP_HELPER_WINDOW_2,
767 mem_type,
768 pmem_addr / 2,
769 &page, &addr, &len))
770 {
771 unifi_error(card->ospriv, "Failed to decode PROG MEM PAGE %08lx %d\n", pmem_addr, mem_type);
772 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
773 }
774
775 if (page != card->pmem_page)
776 {
777 unifi_trace(card->ospriv, UDBG6, "setting pmem page=0x%X, addr=0x%lX\n", page, addr);
778
779 /* change page register */
780 r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW2_PAGE(card->helper) * 2, page);
781 if (r != CSR_RESULT_SUCCESS)
782 {
783 unifi_error(card->ospriv, "Failed to write PROG MEM PAGE\n");
784 return r;
785 }
786
787 card->pmem_page = page;
788 }
789
790 *paddr = ((s32)addr * 2) + (pmem_addr & 1);
791
792 return CSR_RESULT_SUCCESS;
793} /* set_pmem_page() */
794
795
796/*
797 * ---------------------------------------------------------------------------
798 * set_page
799 *
800 * Sets up the appropriate page register to access the given address.
801 * Returns the sdio address at which the unifi address can be accessed.
802 *
803 * Arguments:
804 * card Pointer to card structure.
805 * generic_addr UniFi internal address to access, in Generic Pointer
806 * format, i.e. top byte is space indicator.
807 * paddr Location to write page address
808 * SDIO address (24-bit) for use in a unifi_read_direct or
809 * unifi_write_direct call
810 *
811 * Returns:
812 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
813 * CSR_RESULT_FAILURE an SDIO error occurred
814 * CSR_WIFI_HIP_RESULT_INVALID_VALUE the address is invalid
815 * ---------------------------------------------------------------------------
816 */
817static CsrResult set_page(card_t *card, u32 generic_addr, u32 *paddr)
818{
819 s32 space;
820 u32 addr;
821 CsrResult r = CSR_RESULT_SUCCESS;
822
823 if (!paddr)
824 {
825 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
826 }
827 *paddr = 0;
828 space = UNIFI_GP_SPACE(generic_addr);
829 addr = UNIFI_GP_OFFSET(generic_addr);
830 switch (space)
831 {
832 case UNIFI_SH_DMEM:
833 /* Shared Data Memory is accessed via the Shared Data Memory window */
834 r = set_dmem_page(card, addr, paddr);
835 if (r != CSR_RESULT_SUCCESS)
836 {
837 return r;
838 }
839 break;
840
841 case UNIFI_EXT_FLASH:
842 if (!ChipHelper_HasFlash(card->helper))
843 {
844 unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
845 generic_addr, card->helper);
846 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
847 }
848 /* External FLASH is accessed via the Program Memory window */
849 r = set_pmem_page(card, addr, CHIP_HELPER_WT_FLASH, paddr);
850 break;
851
852 case UNIFI_EXT_SRAM:
853 if (!ChipHelper_HasExtSram(card->helper))
854 {
855 unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08l (helper=0x%x)\n",
856 generic_addr, card->helper);
857 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
858 }
859 /* External SRAM is accessed via the Program Memory window */
860 r = set_pmem_page(card, addr, CHIP_HELPER_WT_EXT_SRAM, paddr);
861 break;
862
863 case UNIFI_REGISTERS:
864 /* Registers are accessed directly */
865 *paddr = addr;
866 break;
867
868 case UNIFI_PHY_DMEM:
869 r = unifi_set_proc_select(card, UNIFI_PROC_PHY);
870 if (r != CSR_RESULT_SUCCESS)
871 {
872 return r;
873 }
874 *paddr = ChipHelper_DATA_MEMORY_RAM_OFFSET(card->helper) * 2 + addr;
875 break;
876
877 case UNIFI_MAC_DMEM:
878 r = unifi_set_proc_select(card, UNIFI_PROC_MAC);
879 if (r != CSR_RESULT_SUCCESS)
880 {
881 return r;
882 }
883 *paddr = ChipHelper_DATA_MEMORY_RAM_OFFSET(card->helper) * 2 + addr;
884 break;
885
886 case UNIFI_BT_DMEM:
887 if (!ChipHelper_HasBt(card->helper))
888 {
889 unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
890 generic_addr, card->helper);
891 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
892 }
893 r = unifi_set_proc_select(card, UNIFI_PROC_BT);
894 if (r != CSR_RESULT_SUCCESS)
895 {
896 return r;
897 }
898 *paddr = ChipHelper_DATA_MEMORY_RAM_OFFSET(card->helper) * 2 + addr;
899 break;
900
901 case UNIFI_PHY_PMEM:
902 r = unifi_set_proc_select(card, UNIFI_PROC_PHY);
903 if (r != CSR_RESULT_SUCCESS)
904 {
905 return r;
906 }
907 r = set_pmem_page(card, addr, CHIP_HELPER_WT_CODE_RAM, paddr);
908 break;
909
910 case UNIFI_MAC_PMEM:
911 r = unifi_set_proc_select(card, UNIFI_PROC_MAC);
912 if (r != CSR_RESULT_SUCCESS)
913 {
914 return r;
915 }
916 r = set_pmem_page(card, addr, CHIP_HELPER_WT_CODE_RAM, paddr);
917 break;
918
919 case UNIFI_BT_PMEM:
920 if (!ChipHelper_HasBt(card->helper))
921 {
922 unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
923 generic_addr, card->helper);
924 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
925 }
926 r = unifi_set_proc_select(card, UNIFI_PROC_BT);
927 if (r != CSR_RESULT_SUCCESS)
928 {
929 return r;
930 }
931 r = set_pmem_page(card, addr, CHIP_HELPER_WT_CODE_RAM, paddr);
932 break;
933
934 case UNIFI_PHY_ROM:
935 if (!ChipHelper_HasRom(card->helper))
936 {
937 unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
938 generic_addr, card->helper);
939 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
940 }
941 r = unifi_set_proc_select(card, UNIFI_PROC_PHY);
942 if (r != CSR_RESULT_SUCCESS)
943 {
944 return r;
945 }
946 r = set_pmem_page(card, addr, CHIP_HELPER_WT_ROM, paddr);
947 break;
948
949 case UNIFI_MAC_ROM:
950 if (!ChipHelper_HasRom(card->helper))
951 {
952 unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
953 generic_addr, card->helper);
954 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
955 }
956 r = unifi_set_proc_select(card, UNIFI_PROC_MAC);
957 if (r != CSR_RESULT_SUCCESS)
958 {
959 return r;
960 }
961 r = set_pmem_page(card, addr, CHIP_HELPER_WT_ROM, paddr);
962 break;
963
964 case UNIFI_BT_ROM:
965 if (!ChipHelper_HasRom(card->helper) || !ChipHelper_HasBt(card->helper))
966 {
967 unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
968 generic_addr, card->helper);
969 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
970 }
971 r = unifi_set_proc_select(card, UNIFI_PROC_BT);
972 if (r != CSR_RESULT_SUCCESS)
973 {
974 return r;
975 }
976 r = set_pmem_page(card, addr, CHIP_HELPER_WT_ROM, paddr);
977 break;
978
979 default:
980 unifi_error(card->ospriv, "Bad address space %d in generic pointer 0x%08lX (helper=0x%x)\n",
981 space, generic_addr, card->helper);
982 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
983 }
984
985 return r;
986} /* set_page() */
987
988
989/*
990 * ---------------------------------------------------------------------------
991 * unifi_set_proc_select
992 *
993 *
994 * Arguments:
995 * card Pointer to card structure.
996 * select Which XAP core to select
997 *
998 * Returns:
999 * 0 on success, non-zero error code on error:
1000 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1001 * CSR_RESULT_FAILURE an SDIO error occurred
1002 * ---------------------------------------------------------------------------
1003 */
1004CsrResult unifi_set_proc_select(card_t *card, enum unifi_dbg_processors_select select)
1005{
1006 CsrResult r;
1007
1008 /* Verify the the select value is allowed. */
1009 switch (select)
1010 {
1011 case UNIFI_PROC_MAC:
1012 case UNIFI_PROC_PHY:
1013 case UNIFI_PROC_BOTH:
1014 break;
1015
1016
1017 default:
1018 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
1019 }
1020
1021 if (card->proc_select != (u32)select)
1022 {
1023 r = unifi_write_direct16(card,
1024 ChipHelper_DBG_HOST_PROC_SELECT(card->helper) * 2,
1025 (u8)select);
1026 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1027 {
1028 return r;
1029 }
1030 if (r != CSR_RESULT_SUCCESS)
1031 {
1032 unifi_error(card->ospriv, "Failed to write to Proc Select register\n");
1033 return r;
1034 }
1035
1036 card->proc_select = (u32)select;
1037 }
1038
1039 return CSR_RESULT_SUCCESS;
1040}
1041
1042
1043/*
1044 * ---------------------------------------------------------------------------
1045 * unifi_read_8_or_16
1046 *
1047 * Performs a byte read of the given address in shared data memory.
1048 * Set up the shared data memory page register as required.
1049 *
1050 * Arguments:
1051 * card Pointer to card structure.
1052 * unifi_addr UniFi shared-data-memory address to access.
1053 * pdata Pointer to a byte variable for the value read.
1054 *
1055 * Returns:
1056 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
1057 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1058 * CSR_RESULT_FAILURE an SDIO error occurred
1059 * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
1060 * ---------------------------------------------------------------------------
1061 */
1062CsrResult unifi_read_8_or_16(card_t *card, u32 unifi_addr, u8 *pdata)
1063{
1064 u32 sdio_addr;
1065 CsrResult r;
1066#ifdef CSR_WIFI_TRANSPORT_CSPI
1067 u16 w;
1068#endif
1069
1070 r = set_page(card, unifi_addr, &sdio_addr);
1071 if (r != CSR_RESULT_SUCCESS)
1072 {
1073 return r;
1074 }
1075
1076#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1077 card->cmd_prof.cmd52_r8or16_count++;
1078#endif
1079#ifdef CSR_WIFI_TRANSPORT_CSPI
1080 r = retrying_read16(card, card->function, sdio_addr, &w);
1081 *pdata = (u8)(w & 0xFF);
1082 return r;
1083#else
1084 return retrying_read8(card, card->function, sdio_addr, pdata);
1085#endif
1086} /* unifi_read_8_or_16() */
1087
1088
1089/*
1090 * ---------------------------------------------------------------------------
1091 * unifi_write_8_or_16
1092 *
1093 * Performs a byte write of the given address in shared data memory.
1094 * Set up the shared data memory page register as required.
1095 *
1096 * Arguments:
1097 * card Pointer to card context struct.
1098 * unifi_addr UniFi shared-data-memory address to access.
1099 * data Value to write.
1100 *
1101 * Returns:
1102 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
1103 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1104 * CSR_RESULT_FAILURE an SDIO error occurred
1105 * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
1106 *
1107 * Notes:
1108 * Beware using unifi_write8() because byte writes are not safe on UniFi.
1109 * Writes to odd bytes are cached, writes to even bytes perform a 16-bit
1110 * write with the previously cached odd byte.
1111 * ---------------------------------------------------------------------------
1112 */
1113CsrResult unifi_write_8_or_16(card_t *card, u32 unifi_addr, u8 data)
1114{
1115 u32 sdio_addr;
1116 CsrResult r;
1117#ifdef CSR_WIFI_TRANSPORT_CSPI
1118 u16 w;
1119#endif
1120
1121 r = set_page(card, unifi_addr, &sdio_addr);
1122 if (r != CSR_RESULT_SUCCESS)
1123 {
1124 return r;
1125 }
1126
1127 if (sdio_addr & 1)
1128 {
1129 unifi_warning(card->ospriv,
1130 "Warning: Byte write to an odd address (0x%lX) is dangerous\n",
1131 sdio_addr);
1132 }
1133
1134#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1135 card->cmd_prof.cmd52_w8or16_count++;
1136#endif
1137#ifdef CSR_WIFI_TRANSPORT_CSPI
1138 w = data;
1139 return retrying_write16(card, card->function, sdio_addr, w);
1140#else
1141 return retrying_write8(card, card->function, sdio_addr, data);
1142#endif
1143} /* unifi_write_8_or_16() */
1144
1145
1146/*
1147 * ---------------------------------------------------------------------------
1148 * unifi_card_read16
1149 *
1150 * Performs a 16-bit read of the given address in shared data memory.
1151 * Set up the shared data memory page register as required.
1152 *
1153 * Arguments:
1154 * card Pointer to card structure.
1155 * unifi_addr UniFi shared-data-memory address to access.
1156 * pdata Pointer to a 16-bit int variable for the value read.
1157 *
1158 * Returns:
1159 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
1160 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1161 * CSR_RESULT_FAILURE an SDIO error occurred
1162 * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
1163 * ---------------------------------------------------------------------------
1164 */
1165CsrResult unifi_card_read16(card_t *card, u32 unifi_addr, u16 *pdata)
1166{
1167 u32 sdio_addr;
1168 CsrResult r;
1169
1170 r = set_page(card, unifi_addr, &sdio_addr);
1171 if (r != CSR_RESULT_SUCCESS)
1172 {
1173 return r;
1174 }
1175
1176#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1177 card->cmd_prof.cmd52_r16_count++;
1178#endif
1179 return unifi_read_direct16(card, sdio_addr, pdata);
1180} /* unifi_card_read16() */
1181
1182
1183/*
1184 * ---------------------------------------------------------------------------
1185 * unifi_card_write16
1186 *
1187 * Performs a 16-bit write of the given address in shared data memory.
1188 * Set up the shared data memory page register as required.
1189 *
1190 * Arguments:
1191 * card Pointer to card structure.
1192 * unifi_addr UniFi shared-data-memory address to access.
1193 * pdata Pointer to a byte variable for the value write.
1194 *
1195 * Returns:
1196 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
1197 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1198 * CSR_RESULT_FAILURE an SDIO error occurred
1199 * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
1200 * ---------------------------------------------------------------------------
1201 */
1202CsrResult unifi_card_write16(card_t *card, u32 unifi_addr, u16 data)
1203{
1204 u32 sdio_addr;
1205 CsrResult r;
1206
1207 r = set_page(card, unifi_addr, &sdio_addr);
1208 if (r != CSR_RESULT_SUCCESS)
1209 {
1210 return r;
1211 }
1212
1213#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1214 card->cmd_prof.cmd52_w16_count++;
1215#endif
1216 return unifi_write_direct16(card, sdio_addr, data);
1217} /* unifi_card_write16() */
1218
1219
1220/*
1221 * ---------------------------------------------------------------------------
1222 * unifi_read32
1223 *
1224 * Performs a 32-bit read of the given address in shared data memory.
1225 * Set up the shared data memory page register as required.
1226 *
1227 * Arguments:
1228 * card Pointer to card structure.
1229 * unifi_addr UniFi shared-data-memory address to access.
1230 * pdata Pointer to a int variable for the value read.
1231 *
1232 * Returns:
1233 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
1234 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1235 * CSR_RESULT_FAILURE an SDIO error occurred
1236 * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
1237 * ---------------------------------------------------------------------------
1238 */
1239CsrResult unifi_read32(card_t *card, u32 unifi_addr, u32 *pdata)
1240{
1241 u32 sdio_addr;
1242 CsrResult r;
1243
1244 r = set_page(card, unifi_addr, &sdio_addr);
1245 if (r != CSR_RESULT_SUCCESS)
1246 {
1247 return r;
1248 }
1249
1250#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1251 card->cmd_prof.cmd52_r32_count++;
1252#endif
1253 return unifi_read_direct32(card, sdio_addr, pdata);
1254} /* unifi_read32() */
1255
1256
1257/*
1258 * ---------------------------------------------------------------------------
1259 * unifi_card_readn
1260 * unifi_readnz
1261 *
1262 * Read multiple 8-bit values from the UniFi SDIO interface.
1263 * This function interprets the address as a GenericPointer as
1264 * defined in the UniFi Host Interface Protocol Specification.
1265 * The readnz version of this function will stop when it reads a
1266 * zero octet.
1267 *
1268 * Arguments:
1269 * card Pointer to card structure.
1270 * unifi_addr UniFi shared-data-memory address to access.
1271 * pdata Pointer to which to write data.
1272 * len Number of bytes to read
1273 *
1274 * Returns:
1275 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
1276 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1277 * CSR_RESULT_FAILURE an SDIO error occurred
1278 * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
1279 * ---------------------------------------------------------------------------
1280 */
1281CsrResult unifi_readn_match(card_t *card, u32 unifi_addr, void *pdata, u16 len, s8 match)
1282{
1283 u32 sdio_addr;
1284 CsrResult r;
1285 u32 num;
1286
1287 r = set_page(card, unifi_addr, &sdio_addr);
1288 if (r != CSR_RESULT_SUCCESS)
1289 {
1290 return r;
1291 }
1292
1293 r = unifi_read_directn_match(card, sdio_addr, pdata, len, match, &num);
1294 return r;
1295} /* unifi_readn_match() */
1296
1297
1298CsrResult unifi_card_readn(card_t *card, u32 unifi_addr, void *pdata, u16 len)
1299{
1300 return unifi_readn_match(card, unifi_addr, pdata, len, -1);
1301} /* unifi_card_readn() */
1302
1303
1304CsrResult unifi_readnz(card_t *card, u32 unifi_addr, void *pdata, u16 len)
1305{
1306 return unifi_readn_match(card, unifi_addr, pdata, len, 0);
1307} /* unifi_readnz() */
1308
1309
1310/*
1311 * ---------------------------------------------------------------------------
1312 * unifi_read_shared_count
1313 *
1314 * Read signal count locations, checking for an SDIO error. The
1315 * signal count locations only contain a valid number if the
1316 * highest bit isn't set.
1317 *
1318 * Arguments:
1319 * card Pointer to card context structure.
1320 * addr Shared-memory address to read.
1321 *
1322 * Returns:
1323 * Value read from memory (0-127) or -1 on error
1324 * ---------------------------------------------------------------------------
1325 */
1326s32 unifi_read_shared_count(card_t *card, u32 addr)
1327{
1328 u8 b;
1329 /* I've increased this count, because I have seen cases where
1330 * there were three reads in a row with the top bit set. I'm not
1331 * sure why this might have happened, but I can't see a problem
1332 * with increasing this limit. It's better to take a while to
1333 * recover than to fail. */
1334#define SHARED_READ_RETRY_LIMIT 10
1335 s32 i;
1336
1337 /*
1338 * Get the to-host-signals-written count.
1339 * The top-bit will be set if the firmware was in the process of
1340 * changing the value, in which case we read again.
1341 */
1342 /* Limit the number of repeats so we don't freeze */
1343 for (i = 0; i < SHARED_READ_RETRY_LIMIT; i++)
1344 {
1345 CsrResult r;
1346 r = unifi_read_8_or_16(card, addr, &b);
1347 if (r != CSR_RESULT_SUCCESS)
1348 {
1349 return -1;
1350 }
1351 if (!(b & 0x80))
1352 {
1353 /* There is a chance that the MSB may have contained invalid data
1354 * (overflow) at the time it was read. Therefore mask off the MSB.
1355 * This avoids a race between driver read and firmware write of the
1356 * word, the value we need is in the lower 8 bits anway.
1357 */
1358 return (s32)(b & 0xff);
1359 }
1360 }
1361
1362 return -1; /* this function has changed in WMM mods */
1363} /* unifi_read_shared_count() */
1364
1365
1366/*
1367 * ---------------------------------------------------------------------------
1368 * unifi_writen
1369 *
1370 * Write multiple 8-bit values to the UniFi SDIO interface using CMD52
1371 * This function interprets the address as a GenericPointer as
1372 * defined in the UniFi Host Interface Protocol Specification.
1373 *
1374 * Arguments:
1375 * card Pointer to card structure.
1376 * unifi_addr UniFi shared-data-memory address to access.
1377 * pdata Pointer to which to write data.
1378 * len Number of bytes to write
1379 *
1380 * Returns:
1381 * 0 on success, non-zero error code on error:
1382 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1383 * CSR_RESULT_FAILURE an SDIO error occurred
1384 * CSR_WIFI_HIP_RESULT_INVALID_VALUE an odd length or length too big.
1385 * ---------------------------------------------------------------------------
1386 */
1387CsrResult unifi_writen(card_t *card, u32 unifi_addr, void *pdata, u16 len)
1388{
1389 u32 sdio_addr;
1390 CsrResult r;
1391
1392 r = set_page(card, unifi_addr, &sdio_addr);
1393 if (r != CSR_RESULT_SUCCESS)
1394 {
1395 return r;
1396 }
1397
1398 return unifi_write_directn(card, sdio_addr, pdata, len);
1399} /* unifi_writen() */
1400
1401
1402static CsrResult csr_sdio_block_rw(card_t *card, s16 funcnum,
1403 u32 addr, u8 *pdata,
1404 u16 count, s16 dir_is_write)
1405{
1406 CsrResult csrResult;
1407
1408 if (dir_is_write == UNIFI_SDIO_READ)
1409 {
1410#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
1411 unifi_debug_log_to_buf("r@%02X#%X=", addr, count);
1412#endif
1413#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1414 unifi_debug_log_to_buf("R");
1415#endif
1416 csrResult = CsrSdioRead(card->sdio_if, addr, pdata, count);
1417#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1418 unifi_debug_log_to_buf("<");
1419#endif
1420 }
1421 else
1422 {
1423#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
1424 unifi_debug_log_to_buf("w@%02X#%X=", addr, count);
1425 unifi_debug_hex_to_buf(pdata, count > CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH?CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH : count);
1426#endif
1427#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1428 unifi_debug_log_to_buf("W");
1429#endif
1430 csrResult = CsrSdioWrite(card->sdio_if, addr, pdata, count);
1431#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
1432 unifi_debug_log_to_buf(">");
1433#endif
1434 }
1435#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
1436 card->cmd_prof.cmd53_count++;
1437#endif
1438#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
1439 if (csrResult != CSR_RESULT_SUCCESS)
1440 {
1441 unifi_debug_log_to_buf("error=%X", csrResult);
1442 }
1443 else if (dir_is_write == UNIFI_SDIO_READ)
1444 {
1445 unifi_debug_hex_to_buf(pdata, count > CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH?CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH : count);
1446 }
1447 unifi_debug_string_to_buf("\n");
1448#endif
1449 return csrResult; /* CSR SDIO (not HIP) error code */
1450}
1451
1452
1453/*
1454 * ---------------------------------------------------------------------------
1455 * unifi_bulk_rw
1456 *
1457 * Transfer bulk data to or from the UniFi SDIO interface.
1458 * This function is used to read or write signals and bulk data.
1459 *
1460 * Arguments:
1461 * card Pointer to card structure.
1462 * handle Value to put in the Register Address field of the CMD53 req.
1463 * data Pointer to data to write.
1464 * direction One of UNIFI_SDIO_READ or UNIFI_SDIO_WRITE
1465 *
1466 * Returns:
1467 * CSR_RESULT_SUCCESS on success, non-zero error code on error:
1468 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1469 * CSR_RESULT_FAILURE an SDIO error occurred
1470 *
1471 * Notes:
1472 * This function uses SDIO CMD53, which is the block transfer mode.
1473 * ---------------------------------------------------------------------------
1474 */
1475CsrResult unifi_bulk_rw(card_t *card, u32 handle, void *pdata,
1476 u32 len, s16 direction)
1477{
1478#define CMD53_RETRIES 3
1479 /*
1480 * Ideally instead of sleeping, we want to busy wait.
1481 * Currently there is no framework API to do this. When it becomes available,
1482 * we can use it to busy wait using usecs
1483 */
1484#define REWIND_RETRIES 15 /* when REWIND_DELAY==1msec, or 250 when REWIND_DELAY==50usecs */
1485#define REWIND_POLLING_RETRIES 5
1486#define REWIND_DELAY 1 /* msec or 50usecs */
1487 CsrResult csrResult; /* SDIO error code */
1488 CsrResult r = CSR_RESULT_SUCCESS; /* HIP error code */
1489 s16 retries = CMD53_RETRIES;
1490 s16 stat_retries;
1491 u8 stat;
1492 s16 dump_read;
1493#ifdef UNIFI_DEBUG
1494 u8 *pdata_lsb = ((u8 *)&pdata) + card->lsb;
1495#endif
1496#ifdef CSR_WIFI_MAKE_FAKE_CMD53_ERRORS
1497 static s16 fake_error;
1498#endif
1499
1500 dump_read = 0;
1501#ifdef UNIFI_DEBUG
1502 if (*pdata_lsb & 1)
1503 {
1504 unifi_notice(card->ospriv, "CD53 request on a unaligned buffer (addr: 0x%X) dir %s-Host\n",
1505 pdata, (direction == UNIFI_SDIO_READ)?"To" : "From");
1506 if (direction == UNIFI_SDIO_WRITE)
1507 {
1508 dump(pdata, (u16)len);
1509 }
1510 else
1511 {
1512 dump_read = 1;
1513 }
1514 }
1515#endif
1516
1517 /* Defensive checks */
1518 if (!pdata)
1519 {
1520 unifi_error(card->ospriv, "Null pdata for unifi_bulk_rw() len: %d\n", len);
1521 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
1522 }
1523 if ((len & 1) || (len > 0xffff))
1524 {
1525 unifi_error(card->ospriv, "Impossible CMD53 length requested: %d\n", len);
1526 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
1527 }
1528
1529 while (1)
1530 {
1531 csrResult = csr_sdio_block_rw(card, card->function, handle,
1532 (u8 *)pdata, (u16)len,
1533 direction);
1534 if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
1535 {
1536 return CSR_WIFI_HIP_RESULT_NO_DEVICE;
1537 }
1538#ifdef CSR_WIFI_MAKE_FAKE_CMD53_ERRORS
1539 if (++fake_error > 100)
1540 {
1541 fake_error = 90;
1542 unifi_warning(card->ospriv, "Faking a CMD53 error,\n");
1543 if (csrResult == CSR_RESULT_SUCCESS)
1544 {
1545 csrResult = CSR_RESULT_FAILURE;
1546 }
1547 }
1548#endif
1549 if (csrResult == CSR_RESULT_SUCCESS)
1550 {
1551 if (dump_read)
1552 {
1553 dump(pdata, (u16)len);
1554 }
1555 break;
1556 }
1557
1558 /*
1559 * At this point the SDIO driver should have written the I/O Abort
1560 * register to notify UniFi that the command has failed.
1561 * UniFi-1 and UniFi-2 (not UF6xxx) use the same register to store the
1562 * Deep Sleep State. This means we have to restore the Deep Sleep
1563 * State (AWAKE in any case since we can not perform a CD53 in any other
1564 * state) by rewriting the I/O Abort register to its previous value.
1565 */
1566 if (card->chip_id <= SDIO_CARD_ID_UNIFI_2)
1567 {
1568 (void)unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
1569 }
1570
1571 /* If csr_sdio_block_rw() failed in a non-retryable way, or retries exhausted
1572 * then stop retrying
1573 */
1574 if (!retryable_sdio_error(csrResult))
1575 {
1576 unifi_error(card->ospriv, "Fatal error in a CMD53 transfer\n");
1577 break;
1578 }
1579
1580 /*
1581 * These happen from time to time, try again
1582 */
1583 if (--retries == 0)
1584 {
1585 break;
1586 }
1587
1588 unifi_trace(card->ospriv, UDBG4,
1589 "Error in a CMD53 transfer, retrying (h:%d,l:%u)...\n",
1590 (s16)handle & 0xff, len);
1591
1592 /* The transfer failed, rewind and try again */
1593 r = unifi_write_8_or_16(card, card->sdio_ctrl_addr + 8,
1594 (u8)(handle & 0xff));
1595 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1596 {
1597 return r;
1598 }
1599 if (r != CSR_RESULT_SUCCESS)
1600 {
1601 /*
1602 * If we can't even do CMD52 (register read/write) then
1603 * stop here.
1604 */
1605 unifi_error(card->ospriv, "Failed to write REWIND cmd\n");
1606 return r;
1607 }
1608
1609 /* Signal the UniFi to look for the rewind request. */
1610 r = CardGenInt(card);
1611 if (r != CSR_RESULT_SUCCESS)
1612 {
1613 return r;
1614 }
1615
1616 /* Wait for UniFi to acknowledge the rewind */
1617 stat_retries = REWIND_RETRIES;
1618 while (1)
1619 {
1620 r = unifi_read_8_or_16(card, card->sdio_ctrl_addr + 8, &stat);
1621 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
1622 {
1623 return r;
1624 }
1625 if (r != CSR_RESULT_SUCCESS)
1626 {
1627 unifi_error(card->ospriv, "Failed to read REWIND status\n");
1628 return CSR_RESULT_FAILURE;
1629 }
1630
1631 if (stat == 0)
1632 {
1633 break;
1634 }
1635 if (--stat_retries == 0)
1636 {
1637 unifi_error(card->ospriv, "Timeout waiting for REWIND ready\n");
1638 return CSR_RESULT_FAILURE;
1639 }
1640
1641 /* Poll for the ack a few times */
1642 if (stat_retries < REWIND_RETRIES - REWIND_POLLING_RETRIES)
1643 {
1644 CsrThreadSleep(REWIND_DELAY);
1645 }
1646 }
1647 }
1648
1649 /* The call to csr_sdio_block_rw() still failed after retrying */
1650 if (csrResult != CSR_RESULT_SUCCESS)
1651 {
1652 unifi_error(card->ospriv, "Block %s failed after %d retries\n",
1653 (direction == UNIFI_SDIO_READ)?"read" : "write",
1654 CMD53_RETRIES - retries);
1655 /* Report any SDIO error as a general i/o error */
1656 return CSR_RESULT_FAILURE;
1657 }
1658
1659 /* Collect some stats */
1660 if (direction == UNIFI_SDIO_READ)
1661 {
1662 card->sdio_bytes_read += len;
1663 }
1664 else
1665 {
1666 card->sdio_bytes_written += len;
1667 }
1668
1669 return CSR_RESULT_SUCCESS;
1670} /* unifi_bulk_rw() */
1671
1672
1673/*
1674 * ---------------------------------------------------------------------------
1675 * unifi_bulk_rw_noretry
1676 *
1677 * Transfer bulk data to or from the UniFi SDIO interface.
1678 * This function is used to read or write signals and bulk data.
1679 *
1680 * Arguments:
1681 * card Pointer to card structure.
1682 * handle Value to put in the Register Address field of
1683 * the CMD53 req.
1684 * data Pointer to data to write.
1685 * direction One of UNIFI_SDIO_READ or UNIFI_SDIO_WRITE
1686 *
1687 * Returns:
1688 * 0 on success, non-zero error code on error:
1689 * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
1690 * CSR_RESULT_FAILURE an SDIO error occurred
1691 *
1692 * Notes:
1693 * This function uses SDIO CMD53, which is the block transfer mode.
1694 * ---------------------------------------------------------------------------
1695 */
1696CsrResult unifi_bulk_rw_noretry(card_t *card, u32 handle, void *pdata,
1697 u32 len, s16 direction)
1698{
1699 CsrResult csrResult;
1700
1701 csrResult = csr_sdio_block_rw(card, card->function, handle,
1702 (u8 *)pdata, (u16)len, direction);
1703 if (csrResult != CSR_RESULT_SUCCESS)
1704 {
1705 unifi_error(card->ospriv, "Block %s failed\n",
1706 (direction == UNIFI_SDIO_READ)?"read" : "write");
1707 return csrResult;
1708 }
1709
1710 return CSR_RESULT_SUCCESS;
1711} /* unifi_bulk_rw_noretry() */
1712
1713
diff --git a/drivers/staging/csr/csr_wifi_hip_chiphelper.c b/drivers/staging/csr/csr_wifi_hip_chiphelper.c
deleted file mode 100644
index 5cf5b8a5a1e1..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_chiphelper.c
+++ /dev/null
@@ -1,793 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#include "csr_macro.h"
12#include "csr_wifi_hip_chiphelper_private.h"
13
14#ifndef nelem
15#define nelem(a) (sizeof(a) / sizeof(a[0]))
16#endif
17
18#define counted(foo) { nelem(foo), foo }
19#define null_counted() { 0, NULL }
20
21/* The init values are a set of register writes that we must
22 perform when we first connect to the chip to get it working.
23 They swicth on the correct clocks and possibly set the host
24 interface as a wkaeup source. They should not be used if
25 proper HIP opperation is required, but are useful before we
26 do a code download. */
27static const struct chip_helper_init_values init_vals_v1[] = {
28 { 0xFDBB, 0xFFFF },
29 { 0xFDB6, 0x03FF },
30 { 0xFDB1, 0x01E3 },
31 { 0xFDB3, 0x0FFF },
32 { 0xFEE3, 0x08F0 },
33 { 0xFEE7, 0x3C3F },
34 { 0xFEE6, 0x0050 },
35 { 0xFDBA, 0x0000 }
36};
37
38static const struct chip_helper_init_values init_vals_v2[] = {
39 { 0xFDB6, 0x0FFF },
40 { 0xF023, 0x3F3F },
41 { 0xFDB1, 0x01E3 },
42 { 0xFDB3, 0x0FFF },
43 { 0xF003, 0x08F0 },
44 { 0xF007, 0x3C3F },
45 { 0xF006, 0x0050 }
46};
47
48
49static const struct chip_helper_init_values init_vals_v22_v23[] = {
50 { 0xF81C, 0x00FF },
51 /*{ 0x????, 0x???? }, */
52 { 0xF80C, 0x1FFF },
53 { 0xFA25, 0x001F },
54 { 0xF804, 0x00FF },
55 { 0xF802, 0x0FFF },
56 /*{ 0x????, 0x???? },
57 { 0x????, 0x???? },
58 { 0x????, 0x???? }*/
59};
60
61static const u16 reset_program_a_v1_or_v2[] = {
62 0x0000
63};
64static const u16 reset_program_b_v1_or_v2[] = {
65 0x0010, 0xFE00, 0xA021, 0xFF00, 0x8111, 0x0009, 0x0CA4, 0x0114,
66 0x0280, 0x04F8, 0xFE00, 0x6F25, 0x06E0, 0x0010, 0xFC00, 0x0121,
67 0xFC00, 0x0225, 0xFE00, 0x7125, 0xFE00, 0x6D11, 0x03F0, 0xFE00,
68 0x6E25, 0x0008, 0x00E0
69};
70
71static const struct chip_helper_reset_values reset_program_v1_or_v2[] =
72{
73 {
74 MAKE_GP(REGISTERS, 0x000C),
75 nelem(reset_program_a_v1_or_v2),
76 reset_program_a_v1_or_v2
77 },
78 {
79 MAKE_GP(MAC_PMEM, 0x000000),
80 nelem(reset_program_b_v1_or_v2),
81 reset_program_b_v1_or_v2
82 }
83};
84
85static const struct chip_map_address_t unifi_map_address_v1_v2[] =
86{
87 { 0xFE9F, 0xFE7B }, /* PM1_BANK_SELECT */
88 { 0xFE9E, 0xFE78 }, /* PM2_BANK_SELECT */
89 { 0xFE9D, 0xFE7E }, /* SHARED_DMEM_PAGE */
90 { 0xFE91, 0xFE90 }, /* PROC_SELECT */
91 { 0xFE8D, 0xFE8C }, /* STOP_STATUS */
92};
93
94static const struct chip_map_address_t unifi_map_address_v22_v23[] =
95{
96 { 0xF8F9, 0xF8AC }, /* GW1_CONFIG */
97 { 0xF8FA, 0xF8AD }, /* GW2_CONFIG */
98 { 0xF8FB, 0xF8AE }, /* GW3_CONFIG */
99 { 0xF830, 0xF81E }, /* PROC_SELECT */
100 { 0xF831, 0xF81F }, /* STOP_STATUS */
101 { 0xF8FC, 0xF8AF }, /* IO_LOG_ADDRESS */
102};
103
104static const struct chip_device_regs_t unifi_device_regs_null =
105{
106 0xFE81, /* GBL_CHIP_VERSION */
107 0x0000, /* GBL_MISC_ENABLES */
108 0x0000, /* DBG_EMU_CMD */
109 {
110 0x0000, /* HOST.DBG_PROC_SELECT */
111 0x0000, /* HOST.DBG_STOP_STATUS */
112 0x0000, /* HOST.WINDOW1_PAGE */
113 0x0000, /* HOST.WINDOW2_PAGE */
114 0x0000, /* HOST.WINDOW3_PAGE */
115 0x0000 /* HOST.IO_LOG_ADDR */
116 },
117 {
118 0x0000, /* SPI.DBG_PROC_SELECT */
119 0x0000, /* SPI.DBG_STOP_STATUS */
120 0x0000, /* SPI.WINDOW1_PAGE */
121 0x0000, /* SPI.WINDOW2_PAGE */
122 0x0000, /* SPI.WINDOW3_PAGE */
123 0x0000 /* SPI.IO_LOG_ADDR */
124 },
125 0x0000, /* DBG_RESET */
126 0x0000, /* > DBG_RESET_VALUE */
127 0x0000, /* DBG_RESET_WARN */
128 0x0000, /* DBG_RESET_WARN_VALUE */
129 0x0000, /* DBG_RESET_RESULT */
130 0xFFE9, /* XAP_PCH */
131 0xFFEA, /* XAP_PCL */
132 0x0000, /* PROC_PC_SNOOP */
133 0x0000, /* WATCHDOG_DISABLE */
134 0x0000, /* MAILBOX0 */
135 0x0000, /* MAILBOX1 */
136 0x0000, /* MAILBOX2 */
137 0x0000, /* MAILBOX3 */
138 0x0000, /* SDIO_HOST_INT */
139 0x0000, /* SHARED_IO_INTERRUPT */
140 0x0000, /* SDIO HIP HANDSHAKE */
141 0x0000 /* COEX_STATUS */
142};
143
144/* UF105x */
145static const struct chip_device_regs_t unifi_device_regs_v1 =
146{
147 0xFE81, /* GBL_CHIP_VERSION */
148 0xFE87, /* GBL_MISC_ENABLES */
149 0xFE9C, /* DBG_EMU_CMD */
150 {
151 0xFE90, /* HOST.DBG_PROC_SELECT */
152 0xFE8C, /* HOST.DBG_STOP_STATUS */
153 0xFE7B, /* HOST.WINDOW1_PAGE */
154 0xFE78, /* HOST.WINDOW2_PAGE */
155 0xFE7E, /* HOST.WINDOW3_PAGE */
156 0x0000 /* HOST.IO_LOG_ADDR */
157 },
158 {
159 0xFE91, /* SPI.DBG_PROC_SELECT */
160 0xFE8D, /* SPI.DBG_STOP_STATUS */
161 0xFE9F, /* SPI.WINDOW1_PAGE */
162 0xFE9E, /* SPI.WINDOW2_PAGE */
163 0xFE9D, /* SPI.WINDOW3_PAGE */
164 0x0000 /* SPI.IO_LOG_ADDR */
165 },
166 0xFE92, /* DBG_RESET */
167 0x0001, /* > DBG_RESET_VALUE */
168 0xFDA0, /* DBG_RESET_WARN (HOST_SELECT) */
169 0x0000, /* DBG_RESET_WARN_VALUE */
170 0xFE92, /* DBG_RESET_RESULT */
171 0xFFE9, /* XAP_PCH */
172 0xFFEA, /* XAP_PCL */
173 0x0051, /* PROC_PC_SNOOP */
174 0xFE70, /* WATCHDOG_DISABLE */
175 0xFE6B, /* MAILBOX0 */
176 0xFE6A, /* MAILBOX1 */
177 0xFE69, /* MAILBOX2 */
178 0xFE68, /* MAILBOX3 */
179 0xFE67, /* SDIO_HOST_INT */
180 0xFE65, /* SHARED_IO_INTERRUPT */
181 0xFDE9, /* SDIO HIP HANDSHAKE */
182 0x0000 /* COEX_STATUS */
183};
184
185/* UF2... */
186static const struct chip_device_regs_t unifi_device_regs_v2 =
187{
188 0xFE81, /* GBL_CHIP_VERSION */
189 0xFE87, /* GBL_MISC_ENABLES */
190 0xFE9C, /* DBG_EMU_CMD */
191 {
192 0xFE90, /* HOST.DBG_PROC_SELECT */
193 0xFE8C, /* HOST.DBG_STOP_STATUS */
194 0xFE7B, /* HOST.WINDOW1_PAGE */
195 0xFE78, /* HOST.WINDOW2_PAGE */
196 0xFE7E, /* HOST.WINDOW3_PAGE */
197 0x0000 /* HOST.IO_LOG_ADDR */
198 },
199 {
200 0xFE91, /* SPI.DBG_PROC_SELECT */
201 0xFE8D, /* SPI.DBG_STOP_STATUS */
202 0xFE9F, /* SPI.WINDOW1_PAGE */
203 0xFE9E, /* SPI.WINDOW2_PAGE */
204 0xFE9D, /* SPI.WINDOW3_PAGE */
205 0x0000 /* SPI.IO_LOG_ADDR */
206 },
207 0xFE92, /* DBG_RESET */
208 0x0000, /* > DBG_RESET_VALUE */
209 0xFDE9, /* DBG_RESET_WARN (TEST_FLASH_DATA - SHARED_MAILBOX2B) */
210 0xFFFF, /* DBG_RESET_WARN_VALUE */
211 0xFDE9, /* DBG_RESET_RESULT (TEST_FLASH_DATA) */
212 0xFFE9, /* XAP_PCH */
213 0xFFEA, /* XAP_PCL */
214 0x0051, /* PROC_PC_SNOOP */
215 0xFE70, /* WATCHDOG_DISABLE */
216 0xFE6B, /* MAILBOX0 */
217 0xFE6A, /* MAILBOX1 */
218 0xFE69, /* MAILBOX2 */
219 0xFE68, /* MAILBOX3 */
220 0xFE67, /* SDIO_HOST_INT */
221 0xFE65, /* SHARED_IO_INTERRUPT */
222 0xFE69, /* SDIO HIP HANDSHAKE */
223 0x0000 /* COEX_STATUS */
224};
225
226/* UF60xx */
227static const struct chip_device_regs_t unifi_device_regs_v22_v23 =
228{
229 0xFE81, /* GBL_CHIP_VERSION */
230 0xF84F, /* GBL_MISC_ENABLES */
231 0xF81D, /* DBG_EMU_CMD */
232 {
233 0xF81E, /* HOST.DBG_PROC_SELECT */
234 0xF81F, /* HOST.DBG_STOP_STATUS */
235 0xF8AC, /* HOST.WINDOW1_PAGE */
236 0xF8AD, /* HOST.WINDOW2_PAGE */
237 0xF8AE, /* HOST.WINDOW3_PAGE */
238 0xF8AF /* HOST.IO_LOG_ADDR */
239 },
240 {
241 0xF830, /* SPI.DBG_PROC_SELECT */
242 0xF831, /* SPI.DBG_STOP_STATUS */
243 0xF8F9, /* SPI.WINDOW1_PAGE */
244 0xF8FA, /* SPI.WINDOW2_PAGE */
245 0xF8FB, /* SPI.WINDOW3_PAGE */
246 0xF8FC /* SPI.IO_LOG_ADDR */
247 },
248 0xF82F, /* DBG_RESET */
249 0x0001, /* > DBG_RESET_VALUE */
250 0x0000, /* DBG_RESET_WARN */
251 0x0000, /* DBG_RESET_WARN_VALUE */
252 0xF82F, /* DBG_RESET_RESULT */
253 0xFFE9, /* XAP_PCH */
254 0xFFEA, /* XAP_PCL */
255 0x001B, /* PROC_PC_SNOOP */
256 0x0055, /* WATCHDOG_DISABLE */
257 0xF84B, /* MAILBOX0 */
258 0xF84C, /* MAILBOX1 */
259 0xF84D, /* MAILBOX2 */
260 0xF84E, /* MAILBOX3 */
261 0xF92F, /* SDIO_HOST_INT */
262 0xF92B, /* SDIO_FROMHOST_SCRTACH0 / SHARED_IO_INTERRUPT */
263 0xF84D, /* SDIO HIP HANDSHAKE (MAILBOX2) */
264 0xF9FB /* COEX_STATUS */
265};
266
267/* Program memory window on UF105x. */
268static const struct window_shift_info_t prog_window_array_unifi_v1_v2[CHIP_HELPER_WT_COUNT] =
269{
270 { TRUE, 11, 0x0200 }, /* CODE RAM */
271 { TRUE, 11, 0x0000 }, /* FLASH */
272 { TRUE, 11, 0x0400 }, /* External SRAM */
273 { FALSE, 0, 0 }, /* ROM */
274 { FALSE, 0, 0 } /* SHARED */
275};
276
277/* Shared memory window on UF105x. */
278static const struct window_shift_info_t shared_window_array_unifi_v1_v2[CHIP_HELPER_WT_COUNT] =
279{
280 { FALSE, 0, 0 }, /* CODE RAM */
281 { FALSE, 0, 0 }, /* FLASH */
282 { FALSE, 0, 0 }, /* External SRAM */
283 { FALSE, 0, 0 }, /* ROM */
284 { TRUE, 11, 0x0000 } /* SHARED */
285};
286
287/* One of the Generic Windows on UF60xx and later. */
288static const struct window_shift_info_t generic_window_array_unifi_v22_v23[CHIP_HELPER_WT_COUNT] =
289{
290 { TRUE, 11, 0x3800 }, /* CODE RAM */
291 { FALSE, 0, 0 }, /* FLASH */
292 { FALSE, 0, 0 }, /* External SRAM */
293 { TRUE, 11, 0x2000 }, /* ROM */
294 { TRUE, 11, 0x0000 } /* SHARED */
295};
296
297/* The three windows on UF105x. */
298static const struct window_info_t prog1_window_unifi_v1_v2 = { 0x0000, 0x2000, 0x0080, prog_window_array_unifi_v1_v2 };
299static const struct window_info_t prog2_window_unifi_v1_v2 = { 0x2000, 0x2000, 0x0000, prog_window_array_unifi_v1_v2 };
300static const struct window_info_t shared_window_unifi_v1_v2 = { 0x4000, 0x2000, 0x0000, shared_window_array_unifi_v1_v2 };
301
302/* The three windows on UF60xx and later. */
303static const struct window_info_t generic1_window_unifi_v22_v23 = { 0x0000, 0x2000, 0x0080, generic_window_array_unifi_v22_v23 };
304static const struct window_info_t generic2_window_unifi_v22_v23 = { 0x2000, 0x2000, 0x0000, generic_window_array_unifi_v22_v23 };
305static const struct window_info_t generic3_window_unifi_v22_v23 = { 0x4000, 0x2000, 0x0000, generic_window_array_unifi_v22_v23 };
306
307static const struct chip_device_desc_t chip_device_desc_null =
308{
309 { FALSE, 0x0000, 0x0000, 0x00 },
310 "",
311 "",
312 null_counted(), /* init */
313 null_counted(), /* reset_prog */
314 &unifi_device_regs_null, /* regs */
315 {
316 FALSE, /* has_flash */
317 FALSE, /* has_ext_sram */
318 FALSE, /* has_rom */
319 FALSE, /* has_bt */
320 FALSE, /* has_wlan */
321 },
322 null_counted(),
323 /* prog_offset */
324 {
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000
329 },
330 /* data_offset */
331 {
332 0x0000 /* ram */
333 },
334 /* windows */
335 {
336 NULL,
337 NULL,
338 NULL
339 }
340};
341
342static const struct chip_device_desc_t unifi_device_desc_v1 =
343{
344 { FALSE, 0xf0ff, 0x1001, 0x01 }, /* UF105x R01 */
345 "UF105x",
346 "UniFi-1",
347 counted(init_vals_v1), /* init */
348 counted(reset_program_v1_or_v2), /* reset_prog */
349 &unifi_device_regs_v1, /* regs */
350 {
351 TRUE, /* has_flash */
352 TRUE, /* has_ext_sram */
353 FALSE, /* has_rom */
354 FALSE, /* has_bt */
355 TRUE, /* has_wlan */
356 },
357 counted(unifi_map_address_v1_v2), /* map */
358 /* prog_offset */
359 {
360 0x00100000, /* ram */
361 0x00000000, /* rom (invalid) */
362 0x00000000, /* flash */
363 0x00200000, /* ext_ram */
364 },
365 /* data_offset */
366 {
367 0x8000 /* ram */
368 },
369 /* windows */
370 {
371 &prog1_window_unifi_v1_v2,
372 &prog2_window_unifi_v1_v2,
373 &shared_window_unifi_v1_v2
374 }
375};
376
377static const struct chip_device_desc_t unifi_device_desc_v2 =
378{
379 { FALSE, 0xf0ff, 0x2001, 0x02 }, /* UF2... R02 */
380 "UF2...",
381 "UniFi-2",
382 counted(init_vals_v2), /* init */
383 counted(reset_program_v1_or_v2), /* reset_prog */
384 &unifi_device_regs_v2, /* regs */
385 {
386 TRUE, /* has_flash */
387 TRUE, /* has_ext_sram */
388 FALSE, /* has_rom */
389 FALSE, /* has_bt */
390 TRUE, /* has_wlan */
391 },
392 counted(unifi_map_address_v1_v2), /* map */
393 /* prog_offset */
394 {
395 0x00100000, /* ram */
396 0x00000000, /* rom (invalid) */
397 0x00000000, /* flash */
398 0x00200000, /* ext_ram */
399 },
400 /* data_offset */
401 {
402 0x8000 /* ram */
403 },
404 /* windows */
405 {
406 &prog1_window_unifi_v1_v2,
407 &prog2_window_unifi_v1_v2,
408 &shared_window_unifi_v1_v2
409 }
410};
411
412static const struct chip_device_desc_t unifi_device_desc_v3 =
413{
414 { FALSE, 0xf0ff, 0x3001, 0x02 }, /* UF2... R03 */
415 "UF2...",
416 "UniFi-3",
417 counted(init_vals_v2), /* init */
418 counted(reset_program_v1_or_v2), /* reset_prog */
419 &unifi_device_regs_v2, /* regs */
420 {
421 TRUE, /* has_flash */
422 TRUE, /* has_ext_sram */
423 FALSE, /* has_rom */
424 FALSE, /* has_bt */
425 TRUE, /* has_wlan */
426 },
427 counted(unifi_map_address_v1_v2), /* map */
428 /* prog_offset */
429 {
430 0x00100000, /* ram */
431 0x00000000, /* rom (invalid) */
432 0x00000000, /* flash */
433 0x00200000, /* ext_ram */
434 },
435 /* data_offset */
436 {
437 0x8000 /* ram */
438 },
439 /* windows */
440 {
441 &prog1_window_unifi_v1_v2,
442 &prog2_window_unifi_v1_v2,
443 &shared_window_unifi_v1_v2
444 }
445};
446
447static const struct chip_device_desc_t unifi_device_desc_v22 =
448{
449 { FALSE, 0x00ff, 0x0022, 0x07 }, /* UF60xx */
450 "UF60xx",
451 "UniFi-4",
452 counted(init_vals_v22_v23), /* init */
453 null_counted(), /* reset_prog */
454 &unifi_device_regs_v22_v23, /* regs */
455 {
456 FALSE, /* has_flash */
457 FALSE, /* has_ext_sram */
458 TRUE, /* has_rom */
459 FALSE, /* has_bt */
460 TRUE, /* has_wlan */
461 },
462 counted(unifi_map_address_v22_v23), /* map */
463 /* prog_offset */
464 {
465 0x00C00000, /* ram */
466 0x00000000, /* rom */
467 0x00000000, /* flash (invalid) */
468 0x00000000, /* ext_ram (invalid) */
469 },
470 /* data_offset */
471 {
472 0x8000 /* ram */
473 },
474 /* windows */
475 {
476 &generic1_window_unifi_v22_v23,
477 &generic2_window_unifi_v22_v23,
478 &generic3_window_unifi_v22_v23
479 }
480};
481
482static const struct chip_device_desc_t unifi_device_desc_v23 =
483{
484 { FALSE, 0x00ff, 0x0023, 0x08 }, /* UF.... */
485 "UF....",
486 "UF.... (5)",
487 counted(init_vals_v22_v23), /* init */
488 null_counted(), /* reset_prog */
489 &unifi_device_regs_v22_v23, /* regs */
490 {
491 FALSE, /* has_flash */
492 FALSE, /* has_ext_sram */
493 TRUE, /* has_rom */
494 TRUE, /* has_bt */
495 TRUE, /* has_wlan */
496 },
497 counted(unifi_map_address_v22_v23),
498 /* prog_offset */
499 {
500 0x00C00000, /* ram */
501 0x00000000, /* rom */
502 0x00000000, /* flash (invalid) */
503 0x00000000, /* ext_sram (invalid) */
504 },
505 /* data_offset */
506 {
507 0x8000 /* ram */
508 },
509 /* windows */
510 {
511 &generic1_window_unifi_v22_v23,
512 &generic2_window_unifi_v22_v23,
513 &generic3_window_unifi_v22_v23
514 }
515};
516
517static const struct chip_device_desc_t hyd_wlan_subsys_desc_v1 =
518{
519 { FALSE, 0x00ff, 0x0044, 0x00 }, /* UF.... */
520 "HYD...",
521 "HYD... ",
522 counted(init_vals_v22_v23), /* init */
523 null_counted(), /* reset_prog */
524 &unifi_device_regs_v22_v23, /* regs */
525 {
526 FALSE, /* has_flash */
527 FALSE, /* has_ext_sram */
528 TRUE, /* has_rom */
529 FALSE, /* has_bt */
530 TRUE, /* has_wlan */
531 },
532 counted(unifi_map_address_v22_v23),
533 /* prog_offset */
534 {
535 0x00C00000, /* ram */
536 0x00000000, /* rom */
537 0x00000000, /* flash (invalid) */
538 0x00000000, /* ext_sram (invalid) */
539 },
540 /* data_offset */
541 {
542 0x8000 /* ram */
543 },
544 /* windows */
545 {
546 &generic1_window_unifi_v22_v23,
547 &generic2_window_unifi_v22_v23,
548 &generic3_window_unifi_v22_v23
549 }
550};
551
552
553/* This is the list of all chips that we know about. I'm
554 assuming that the order here will be important - we
555 might have multiple entries witrh the same SDIO id for
556 instance. The first one in this list will be the one
557 that is returned if a search is done on only that id.
558 The client will then have to call GetVersionXXX again
559 but with more detailed info.
560
561 I don't know if we need to signal this up to the client
562 in some way?
563
564 (We get the SDIO id before we know anything else about
565 the chip. We might not be able to read any of the other
566 registers at first, but we still need to know about the
567 chip). */
568static const struct chip_device_desc_t *chip_ver_to_desc[] =
569{
570 &unifi_device_desc_v1, /* UF105x R01 */
571 &unifi_device_desc_v2, /* UF2... R02 */
572 &unifi_device_desc_v3, /* UF2... R03 */
573 &unifi_device_desc_v22, /* UF60xx */
574 &unifi_device_desc_v23, /* UF.... */
575 &hyd_wlan_subsys_desc_v1
576};
577
578ChipDescript* ChipHelper_GetVersionSdio(u8 sdio_ver)
579{
580 u32 i;
581
582 for (i = 0; i < nelem(chip_ver_to_desc); i++)
583 {
584 if (chip_ver_to_desc[i]->chip_version.sdio == sdio_ver)
585 {
586 return chip_ver_to_desc[i];
587 }
588 }
589
590 return &chip_device_desc_null;
591}
592
593
594ChipDescript* ChipHelper_GetVersionAny(u16 from_FF9A, u16 from_FE81)
595{
596 u32 i;
597
598 if ((from_FF9A & 0xFF00) != 0)
599 {
600 for (i = 0; i < nelem(chip_ver_to_desc); i++)
601 {
602 if (chip_ver_to_desc[i]->chip_version.pre_bc7 &&
603 ((from_FF9A & chip_ver_to_desc[i]->chip_version.mask) ==
604 chip_ver_to_desc[i]->chip_version.result))
605 {
606 return chip_ver_to_desc[i];
607 }
608 }
609 }
610 else
611 {
612 for (i = 0; i < nelem(chip_ver_to_desc); i++)
613 {
614 if (!chip_ver_to_desc[i]->chip_version.pre_bc7 &&
615 ((from_FE81 & chip_ver_to_desc[i]->chip_version.mask) ==
616 chip_ver_to_desc[i]->chip_version.result))
617 {
618 return chip_ver_to_desc[i];
619 }
620 }
621 }
622
623 return &chip_device_desc_null;
624}
625
626
627ChipDescript* ChipHelper_GetVersionUniFi(u16 ver)
628{
629 return ChipHelper_GetVersionAny(0x0000, ver);
630}
631
632
633ChipDescript *ChipHelper_Null(void)
634{
635 return &chip_device_desc_null;
636}
637
638
639ChipDescript* ChipHelper_GetVersionBlueCore(enum chip_helper_bluecore_age bc_age, u16 version)
640{
641 if (bc_age == chip_helper_bluecore_pre_bc7)
642 {
643 return ChipHelper_GetVersionAny(version, 0x0000);
644 }
645 else
646 {
647 return ChipHelper_GetVersionAny(0x0000, version);
648 }
649}
650
651
652/* Expand the DEF0 functions into simple code to return the
653 correct thing. The DEF1 functions expand to nothing in
654 this X macro expansion. */
655#define CHIP_HELPER_DEF0_C_DEF(ret_type, name, info) \
656 ret_type ChipHelper_ ## name(ChipDescript * chip_help) \
657 { \
658 return chip_help->info; \
659 }
660#define CHIP_HELPER_DEF1_C_DEF(ret_type, name, type1, name1)
661
662CHIP_HELPER_LIST(C_DEF)
663
664/*
665 * Map register addresses between HOST and SPI access.
666 */
667u16 ChipHelper_MapAddress_SPI2HOST(ChipDescript *chip_help, u16 addr)
668{
669 u32 i;
670 for (i = 0; i < chip_help->map.len; i++)
671 {
672 if (chip_help->map.vals[i].spi == addr)
673 {
674 return chip_help->map.vals[i].host;
675 }
676 }
677 return addr;
678}
679
680
681u16 ChipHelper_MapAddress_HOST2SPI(ChipDescript *chip_help, u16 addr)
682{
683 u32 i;
684 for (i = 0; i < chip_help->map.len; i++)
685 {
686 if (chip_help->map.vals[i].host == addr)
687 {
688 return chip_help->map.vals[i].spi;
689 }
690 }
691 return addr;
692}
693
694
695/* The address returned by this function is the start of the
696 window in the address space, that is where we can start
697 accessing data from. If a section of the window at the
698 start is unusable because something else is cluttering up
699 the address map then that is taken into account and this
700 function returns that address justt past that. */
701u16 ChipHelper_WINDOW_ADDRESS(ChipDescript *chip_help,
702 enum chip_helper_window_index window)
703{
704 if (window < CHIP_HELPER_WINDOW_COUNT &&
705 chip_help->windows[window] != NULL)
706 {
707 return chip_help->windows[window]->address + chip_help->windows[window]->blocked;
708 }
709 return 0;
710}
711
712
713/* This returns the size of the window minus any blocked section */
714u16 ChipHelper_WINDOW_SIZE(ChipDescript *chip_help,
715 enum chip_helper_window_index window)
716{
717 if (window < CHIP_HELPER_WINDOW_COUNT &&
718 chip_help->windows[window] != NULL)
719 {
720 return chip_help->windows[window]->size - chip_help->windows[window]->blocked;
721 }
722 return 0;
723}
724
725
726/* Get the register writes we should do to make sure that
727 the chip is running with most clocks on. */
728u32 ChipHelper_ClockStartupSequence(ChipDescript *chip_help,
729 const struct chip_helper_init_values **val)
730{
731 *val = chip_help->init.vals;
732 return chip_help->init.len;
733}
734
735
736/* Get the set of values tat we should write to the chip to perform a reset. */
737u32 ChipHelper_HostResetSequence(ChipDescript *chip_help,
738 const struct chip_helper_reset_values **val)
739{
740 *val = chip_help->reset_prog.vals;
741 return chip_help->reset_prog.len;
742}
743
744
745/* Decode a windowed access to the chip. */
746s32 ChipHelper_DecodeWindow(ChipDescript *chip_help,
747 enum chip_helper_window_index window,
748 enum chip_helper_window_type type,
749 u32 offset,
750 u16 *page, u16 *addr, u32 *len)
751{
752 const struct window_info_t *win;
753 const struct window_shift_info_t *mode;
754 u16 of, pg;
755
756 if (window >= CHIP_HELPER_WINDOW_COUNT)
757 {
758 return FALSE;
759 }
760 if ((win = chip_help->windows[window]) == NULL)
761 {
762 return FALSE;
763 }
764 if (type >= CHIP_HELPER_WT_COUNT)
765 {
766 return FALSE;
767 }
768 if ((mode = &win->mode[type]) == NULL)
769 {
770 return FALSE;
771 }
772 if (!mode->allowed)
773 {
774 return FALSE;
775 }
776
777 pg = (u16)(offset >> mode->page_shift) + mode->page_offset;
778 of = (u16)(offset & ((1 << mode->page_shift) - 1));
779 /* If 'blocked' is zero this does nothing, else decrease
780 the page register and increase the offset until we aren't
781 in the blocked region of the window. */
782 while (of < win->blocked)
783 {
784 of += 1 << mode->page_shift;
785 pg--;
786 }
787 *page = pg;
788 *addr = win->address + of;
789 *len = win->size - of;
790 return TRUE;
791}
792
793
diff --git a/drivers/staging/csr/csr_wifi_hip_chiphelper.h b/drivers/staging/csr/csr_wifi_hip_chiphelper.h
deleted file mode 100644
index 09b3aefcbced..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_chiphelper.h
+++ /dev/null
@@ -1,407 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#ifndef CSR_WIFI_HIP_CHIPHELPER_H__
12#define CSR_WIFI_HIP_CHIPHELPER_H__
13
14
15#include <linux/types.h>
16
17/* The age of the BlueCore chip. This is probably not useful, if
18 you know the age then you can probably work out the version directly. */
19enum chip_helper_bluecore_age
20{
21 chip_helper_bluecore_pre_bc7,
22 chip_helper_bluecore_bc7_or_later
23};
24
25/* We support up to three windowed regions at the moment.
26 Don't reorder these - they're used to index into an array. */
27enum chip_helper_window_index
28{
29 CHIP_HELPER_WINDOW_1 = 0,
30 CHIP_HELPER_WINDOW_2 = 1,
31 CHIP_HELPER_WINDOW_3 = 2,
32 CHIP_HELPER_WINDOW_COUNT = 3
33};
34
35/* These are the things that we can access through a window.
36 Don't reorder these - they're used to index into an array. */
37enum chip_helper_window_type
38{
39 CHIP_HELPER_WT_CODE_RAM = 0,
40 CHIP_HELPER_WT_FLASH = 1,
41 CHIP_HELPER_WT_EXT_SRAM = 2,
42 CHIP_HELPER_WT_ROM = 3,
43 CHIP_HELPER_WT_SHARED = 4,
44 CHIP_HELPER_WT_COUNT = 5
45};
46
47/* Commands to stop and start the XAP */
48enum chip_helper_dbg_emu_cmd_enum
49{
50 CHIP_HELPER_DBG_EMU_CMD_XAP_STEP_MASK = 0x0001,
51 CHIP_HELPER_DBG_EMU_CMD_XAP_RUN_B_MASK = 0x0002,
52 CHIP_HELPER_DBG_EMU_CMD_XAP_BRK_MASK = 0x0004,
53 CHIP_HELPER_DBG_EMU_CMD_XAP_WAKEUP_MASK = 0x0008
54};
55
56/* Bitmasks for Stop and sleep status: DBG_SPI_STOP_STATUS & DBG_HOST_STOP_STATUS */
57enum chip_helper_dbg_stop_status_enum
58{
59 CHIP_HELPER_DBG_STOP_STATUS_NONE_MASK = 0x0000,
60 CHIP_HELPER_DBG_STOP_STATUS_P0_MASK = 0x0001,
61 CHIP_HELPER_DBG_STOP_STATUS_P1_MASK = 0x0002,
62 CHIP_HELPER_DBG_STOP_STATUS_P2_MASK = 0x0004,
63 CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_P0_MASK = 0x0008,
64 CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_P1_MASK = 0x0010,
65 CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_P2_MASK = 0x0020,
66 /* Legacy names/alias */
67 CHIP_HELPER_DBG_STOP_STATUS_MAC_MASK = 0x0001,
68 CHIP_HELPER_DBG_STOP_STATUS_PHY_MASK = 0x0002,
69 CHIP_HELPER_DBG_STOP_STATUS_BT_MASK = 0x0004,
70 CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_MAC_MASK = 0x0008,
71 CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_PHY_MASK = 0x0010,
72 CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_BT_MASK = 0x0020
73};
74
75/* Codes to disable the watchdog */
76enum chip_helper_watchdog_disable_enum
77{
78 CHIP_HELPER_WATCHDOG_DISABLE_CODE1 = 0x6734,
79 CHIP_HELPER_WATCHDOG_DISABLE_CODE2 = 0xD6BF,
80 CHIP_HELPER_WATCHDOG_DISABLE_CODE3 = 0xC31E
81};
82
83/* Other bits have changed between versions */
84enum chip_helper_gbl_misc_enum
85{
86 CHIP_HELPER_GBL_MISC_SPI_STOP_OUT_EN_MASK = 0x0001,
87 CHIP_HELPER_GBL_MISC_MMU_INIT_DONE_MASK = 0x0004
88};
89
90/* Coex status register, contains interrupt status and reset pullup status.
91 * CHIP_HELPER_COEX_STATUS_RST_PULLS_MSB_MASK can be used to check
92 * for WAPI on R03 chips and later. */
93enum chip_helper_coex_status_mask_enum
94{
95 CHIP_HELPER_COEX_STATUS_RST_PULLS_LSB_MASK = 0x0001,
96 CHIP_HELPER_COEX_STATUS_RST_PULLS_MSB_MASK = 0x0008,
97 CHIP_HELPER_COEX_STATUS_WL_FEC_PINS_LSB_MASK = 0x0010,
98 CHIP_HELPER_COEX_STATUS_WL_FEC_PINS_MSB_MASK = 0x0080,
99 CHIP_HELPER_COEX_STATUS_INT_UART_MASK = 0x0100,
100 CHIP_HELPER_COEX_STATUS_INT_BT_LEG_MASK = 0x0200
101};
102
103/* How to select the different CPUs */
104enum chip_helper_dbg_proc_sel_enum
105{
106 CHIP_HELPER_DBG_PROC_SEL_MAC = 0,
107 CHIP_HELPER_DBG_PROC_SEL_PHY = 1,
108 CHIP_HELPER_DBG_PROC_SEL_BT = 2,
109 CHIP_HELPER_DBG_PROC_SEL_NONE = 2,
110 CHIP_HELPER_DBG_PROC_SEL_BOTH = 3
111};
112
113/* These are the only registers that we have to know the
114 address of before we know the chip version. */
115enum chip_helper_fixed_registers
116{
117 /* This is the address of GBL_CHIP_VERISON on BC7,
118 UF105x, UF60xx and
119 anything later than that. */
120 CHIP_HELPER_UNIFI_GBL_CHIP_VERSION = 0xFE81,
121
122 CHIP_HELPER_OLD_BLUECORE_GBL_CHIP_VERSION = 0xFF9A
123
124 /* This isn't used at the moment (but might be needed
125 to distinguish the BlueCore sub version?) */
126 /* CHIP_HELPER_OLD_BLUECORE_ANA_VERSION_ID = 0xFF7D */
127};
128
129/* Address-value pairs for defining initialisation values */
130struct chip_helper_init_values
131{
132 u16 addr;
133 u16 value;
134};
135
136/* A block of data that should be written to the device */
137struct chip_helper_reset_values
138{
139 u32 gp_address;
140 u32 len;
141 const u16 *data;
142};
143
144/*
145 * This is the C API.
146 */
147
148/* opaque type */
149typedef const struct chip_device_desc_t ChipDescript;
150
151/* Return a NULL descriptor */
152ChipDescript* ChipHelper_Null(void);
153
154/* This should get the correct version for any CSR chip.
155 The two parameters are what is read from addresses
156 0xFF9A and 0xFE81 (OLD_BLUECORE_GBL_CHIP_VERSION and
157 UNIFI_GBL_CHIP_VERSION). These should give a unique identity
158 for most (all?) chips.
159
160 FF9A is the old GBL_CHIP_VERSION register. If the high
161 eight bits are zero then the chip is a new (BC7 +) one
162 and FE81 is the _new_ GBL_CHIP_VERSION register. */
163ChipDescript* ChipHelper_GetVersionAny(u16 from_FF9A, u16 from_FE81);
164
165/* The chip is a UniFi, but we don't know which type
166 The parameter is the value of UNIFI_GBL_CHIP_VERSION (0xFE81) */
167ChipDescript* ChipHelper_GetVersionUniFi(u16 version);
168
169/* This gets the version from the SDIO device id. This only
170 gives quite a coarse grained version, so we should update once
171 we hav access to the function N registers. */
172ChipDescript* ChipHelper_GetVersionSdio(u8 sdio_version);
173
174/* The chip is some sort of BlueCore. If "age" is "pre_bc7" then
175 "version" is what was read from FF9A. If "age" is bc7_or_later
176 then "version" is read from FE81. If we don't know if we're pre
177 or post BC7 then we should use "GetVersionAny". */
178ChipDescript* ChipHelper_GetVersionBlueCore(enum chip_helper_bluecore_age age,
179 u16 version);
180
181/* The main functions of this class are built with an X macro. This
182 means we can generate the C and C++ versions from the same source
183 without the two diverging.
184
185 The DEF0 functions are simple and take no parameters. The first
186 parameter to the macro is the return type. The second parameter
187 is the function name and the third parameter is where to get the
188 info from (this is hidden from the user).
189
190 The DEF1 functions take one parameter. This time the third macro
191 parameter is the type of this parameter, and the fourth macro
192 parameter is the name of the parameter. The bodies of these
193 functions are hand written. */
194#define CHIP_HELPER_LIST(m) \
195 CHIP_HELPER_DEF0(m, (const char *, FriendlyName, friendly_name)) \
196 CHIP_HELPER_DEF0(m, (const char *, MarketingName, marketing_name)) \
197 CHIP_HELPER_DEF0(m, (u16, DBG_EMU_CMD, regs->dbg_emu_cmd)) \
198 CHIP_HELPER_DEF0(m, (u16, DBG_HOST_PROC_SELECT, regs->host.dbg_proc_select)) \
199 CHIP_HELPER_DEF0(m, (u16, DBG_HOST_STOP_STATUS, regs->host.dbg_stop_status)) \
200 CHIP_HELPER_DEF0(m, (u16, HOST_WINDOW1_PAGE, regs->host.window1_page)) \
201 CHIP_HELPER_DEF0(m, (u16, HOST_WINDOW2_PAGE, regs->host.window2_page)) \
202 CHIP_HELPER_DEF0(m, (u16, HOST_WINDOW3_PAGE, regs->host.window3_page)) \
203 CHIP_HELPER_DEF0(m, (u16, HOST_IO_LOG_ADDR, regs->host.io_log_addr)) \
204 CHIP_HELPER_DEF0(m, (u16, DBG_SPI_PROC_SELECT, regs->spi.dbg_proc_select)) \
205 CHIP_HELPER_DEF0(m, (u16, DBG_SPI_STOP_STATUS, regs->spi.dbg_stop_status)) \
206 CHIP_HELPER_DEF0(m, (u16, SPI_WINDOW1_PAGE, regs->spi.window1_page)) \
207 CHIP_HELPER_DEF0(m, (u16, SPI_WINDOW2_PAGE, regs->spi.window2_page)) \
208 CHIP_HELPER_DEF0(m, (u16, SPI_WINDOW3_PAGE, regs->spi.window3_page)) \
209 CHIP_HELPER_DEF0(m, (u16, SPI_IO_LOG_ADDR, regs->spi.io_log_addr)) \
210 CHIP_HELPER_DEF0(m, (u16, DBG_RESET, regs->dbg_reset)) \
211 CHIP_HELPER_DEF0(m, (u16, DBG_RESET_VALUE, regs->dbg_reset_value)) \
212 CHIP_HELPER_DEF0(m, (u16, DBG_RESET_WARN, regs->dbg_reset_warn)) \
213 CHIP_HELPER_DEF0(m, (u16, DBG_RESET_WARN_VALUE, regs->dbg_reset_warn_value)) \
214 CHIP_HELPER_DEF0(m, (u16, DBG_RESET_RESULT, regs->dbg_reset_result)) \
215 CHIP_HELPER_DEF0(m, (u16, WATCHDOG_DISABLE, regs->watchdog_disable)) \
216 CHIP_HELPER_DEF0(m, (u16, PROC_PC_SNOOP, regs->proc_pc_snoop)) \
217 CHIP_HELPER_DEF0(m, (u16, GBL_CHIP_VERSION, regs->gbl_chip_version)) \
218 CHIP_HELPER_DEF0(m, (u16, GBL_MISC_ENABLES, regs->gbl_misc_enables)) \
219 CHIP_HELPER_DEF0(m, (u16, XAP_PCH, regs->xap_pch)) \
220 CHIP_HELPER_DEF0(m, (u16, XAP_PCL, regs->xap_pcl)) \
221 CHIP_HELPER_DEF0(m, (u16, MAILBOX0, regs->mailbox0)) \
222 CHIP_HELPER_DEF0(m, (u16, MAILBOX1, regs->mailbox1)) \
223 CHIP_HELPER_DEF0(m, (u16, MAILBOX2, regs->mailbox2)) \
224 CHIP_HELPER_DEF0(m, (u16, MAILBOX3, regs->mailbox3)) \
225 CHIP_HELPER_DEF0(m, (u16, SDIO_HIP_HANDSHAKE, regs->sdio_hip_handshake)) \
226 CHIP_HELPER_DEF0(m, (u16, SDIO_HOST_INT, regs->sdio_host_int)) \
227 CHIP_HELPER_DEF0(m, (u16, COEX_STATUS, regs->coex_status)) \
228 CHIP_HELPER_DEF0(m, (u16, SHARED_IO_INTERRUPT, regs->shared_io_interrupt)) \
229 CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_RAM_OFFSET, prog_offset.ram)) \
230 CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_ROM_OFFSET, prog_offset.rom)) \
231 CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_FLASH_OFFSET, prog_offset.flash)) \
232 CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_EXT_SRAM_OFFSET, prog_offset.ext_sram)) \
233 CHIP_HELPER_DEF0(m, (u16, DATA_MEMORY_RAM_OFFSET, data_offset.ram)) \
234 CHIP_HELPER_DEF0(m, (s32, HasFlash, bools.has_flash)) \
235 CHIP_HELPER_DEF0(m, (s32, HasExtSram, bools.has_ext_sram)) \
236 CHIP_HELPER_DEF0(m, (s32, HasRom, bools.has_rom)) \
237 CHIP_HELPER_DEF0(m, (s32, HasBt, bools.has_bt)) \
238 CHIP_HELPER_DEF0(m, (s32, HasWLan, bools.has_wlan)) \
239 CHIP_HELPER_DEF1(m, (u16, WINDOW_ADDRESS, enum chip_helper_window_index, window)) \
240 CHIP_HELPER_DEF1(m, (u16, WINDOW_SIZE, enum chip_helper_window_index, window)) \
241 CHIP_HELPER_DEF1(m, (u16, MapAddress_SPI2HOST, u16, addr)) \
242 CHIP_HELPER_DEF1(m, (u16, MapAddress_HOST2SPI, u16, addr)) \
243 CHIP_HELPER_DEF1(m, (u32, ClockStartupSequence, const struct chip_helper_init_values **, val)) \
244 CHIP_HELPER_DEF1(m, (u32, HostResetSequence, const struct chip_helper_reset_values **, val))
245
246/* Some magic to help the expansion */
247#define CHIP_HELPER_DEF0(a, b) \
248 CHIP_HELPER_DEF0_ ## a b
249#define CHIP_HELPER_DEF1(a, b) \
250 CHIP_HELPER_DEF1_ ## a b
251
252/* Macros so that when we expand the list we get "C" function prototypes. */
253#define CHIP_HELPER_DEF0_C_DEC(ret_type, name, info) \
254 ret_type ChipHelper_ ## name(ChipDescript * chip_help);
255#define CHIP_HELPER_DEF1_C_DEC(ret_type, name, type1, name1) \
256 ret_type ChipHelper_ ## name(ChipDescript * chip_help, type1 name1);
257
258CHIP_HELPER_LIST(C_DEC)
259
260/* FriendlyName
261 MarketingName
262
263 These two functions return human readable strings that describe
264 the chip. FriendlyName returns something that a software engineer
265 at CSR might understand. MarketingName returns something more like
266 an external name for a CSR chip.
267*/
268/* DBG_EMU_CMD
269 WATCHDOG_DISABLE
270 PROC_PC_SNOOP
271 GBL_CHIP_VERSION
272 XAP_PCH
273 XAP_PCL
274
275 These registers are used to control the XAPs.
276*/
277/* DBG_HOST_PROC_SELECT DBG_HOST_STOP_STATUS
278 HOST_WINDOW1_PAGE HOST_WINDOW2_PAGE HOST_WINDOW3_PAGE
279 HOST_IO_LOG_ADDR
280 DBG_SPI_PROC_SELECT DBG_SPI_STOP_STATUS
281 SPI_WINDOW1_PAGE SPI_WINDOW2_PAGE SPI_WINDOW3_PAGE
282 SPI_IO_LOG_ADDR
283
284 These register are used to control the XAPs and the memory
285 windows, normally while debugging the code on chip. There
286 are two versons of these registers, one for access via SPI
287 and another for access via the host interface.
288*/
289/* DBG_RESET
290 DBG_RESET_VALUE
291 DBG_RESET_WARN
292 DBG_RESET_WARN_VALUE
293 DBG_RESET_RESULT
294
295 These registers are used to reset the XAP. This can be
296 quite complex for some chips. If DBG_RESET_WARN is non
297 zero the DBG_RESET_WARN_VALUE should be written to address
298 DBG_RESET_WARN before the reset is perfeormed. DBG_RESET_VALUE
299 should then be written to DBG_RESET to make the reset happen.
300 The DBG_RESET_RESULT register should contain 0 if the reset
301 was successful.
302*/
303/* GBL_MISC_ENABLES
304
305 This register controls some special chip features. It
306 should be used with care is it changes quite a lot between
307 chip versions.
308*/
309/* MAILBOX0
310 MAILBOX1
311 MAILBOX2
312 MAILBOX3
313
314 The mailbox registers are for communication between the host
315 and the firmware. There use is described in part by the host
316 interface protcol specifcation.
317*/
318/* SDIO_HIP_HANDSHAKE
319
320 This is one of the more important SDIO HIP registers. On some
321 chips it has the same value as one of the mailbox registers
322 and on other chips it is different.
323*/
324/* SDIO_HOST_INT
325 SHARED_IO_INTERRUPT
326
327 These registers are used by some versions of the host interface
328 protocol specification. Their names should probably be changed
329 to hide the registers and to expose the functions more.
330*/
331/* COEX_STATUS
332
333 Coex status register, contains interrupt status and reset
334 pullup status. The latter is used to detect WAPI.
335*/
336/* PROGRAM_MEMORY_RAM_OFFSET
337 PROGRAM_MEMORY_ROM_OFFSET
338 PROGRAM_MEMORY_FLASH_OFFSET
339 PROGRAM_MEMORY_EXT_SRAM_OFFSET
340 DATA_MEMORY_RAM_OFFSET
341
342 These are constants that describe the offset of the different
343 memory types in the two different address spaces.
344*/
345/* HasFlash HasExtSram HasRom
346 HasBt HasWLan
347
348 These are a set of bools describing the chip.
349*/
350/* WINDOW_ADDRESS WINDOW_SIZE
351
352 These two functions return the size and address of the windows.
353 The address is the address of the lowest value in the address
354 map that is part of the window and the size is the number of
355 visible words.
356
357 Some of the windows have their lowest portion covered by
358 registers. For these windows address is the first address
359 after the registers and size is the siave excluding the part
360 covered by registers.
361*/
362/* MapAddress_SPI2HOST
363 MapAddress_HOST2SPI
364
365 The debugging interface is duplicated on UniFi and later chips
366 so that there are two versions - one over the SPI interaface and
367 the other over the SDIO interface. These functions map the
368 registers between these two interfaces.
369*/
370/* ClockStartupSequence
371
372 This function returns the list of register value pairs that
373 should be forced into UniFi to enable SPI communication. This
374 set of registers is not needed if the firmware is running, but
375 will be needed if the device is being booted from cold. These
376 register writes enable the clocks and setup the PLL to a basic
377 working state. SPI access might be unreliable until these writes
378 have occurred (And they may take mulitple goes).
379*/
380/* HostResetSequence
381
382 This returns a number of chunks of data and generic pointers.
383 All of the XAPs should be stopped. The data should be written
384 to the generic pointers. The instruction pointer for the MAC
385 should then be set to the start of program memory and then the
386 MAC should be "go"d. This will reset the chip in a reliable
387 and orderly manner without resetting the SDIO interface. It
388 is therefore not needed if the chip is being accessed by the
389 SPI interface (the DBG_RESET_ mechanism can be used instead).
390*/
391
392/* The Decode Window function is more complex. For the window
393 'window' it tries to return the address and page register
394 value needed to see offset 'offset' of memory type 'type'.
395
396 It return 1 on success and 0 on failure. 'page' is what
397 should be written to the page register. 'addr' is the
398 address in the XAPs 16 address map to read from. 'len'
399 is the length that we can read without having to change
400 the page registers. */
401s32 ChipHelper_DecodeWindow(ChipDescript *chip_help,
402 enum chip_helper_window_index window,
403 enum chip_helper_window_type type,
404 u32 offset,
405 u16 *page, u16 *addr, u32 *len);
406
407#endif
diff --git a/drivers/staging/csr/csr_wifi_hip_chiphelper_private.h b/drivers/staging/csr/csr_wifi_hip_chiphelper_private.h
deleted file mode 100644
index e5e579912550..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_chiphelper_private.h
+++ /dev/null
@@ -1,200 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11#ifndef CSR_WIFI_HIP_CHIPHELPER_PRIVATE_H__
12#define CSR_WIFI_HIP_CHIPHELPER_PRIVATE_H__
13
14
15#include "csr_wifi_hip_chiphelper.h"
16
17/* This GP stuff should be somewhere else? */
18
19/* Memory spaces encoded in top byte of Generic Pointer type */
20#define UNIFI_SH_DMEM 0x01 /* Shared Data Memory */
21#define UNIFI_EXT_FLASH 0x02 /* External FLASH */
22#define UNIFI_EXT_SRAM 0x03 /* External SRAM */
23#define UNIFI_REGISTERS 0x04 /* Registers */
24#define UNIFI_PHY_DMEM 0x10 /* PHY Data Memory */
25#define UNIFI_PHY_PMEM 0x11 /* PHY Program Memory */
26#define UNIFI_PHY_ROM 0x12 /* PHY ROM */
27#define UNIFI_MAC_DMEM 0x20 /* MAC Data Memory */
28#define UNIFI_MAC_PMEM 0x21 /* MAC Program Memory */
29#define UNIFI_MAC_ROM 0x22 /* MAC ROM */
30#define UNIFI_BT_DMEM 0x30 /* BT Data Memory */
31#define UNIFI_BT_PMEM 0x31 /* BT Program Memory */
32#define UNIFI_BT_ROM 0x32 /* BT ROM */
33
34#define MAKE_GP(R, O) (((UNIFI_ ## R) << 24) | (O))
35#define GP_OFFSET(GP) ((GP) & 0xFFFFFF)
36#define GP_SPACE(GP) (((GP) >> 24) & 0xFF)
37
38
39/* Address value pairs */
40struct val_array_t
41{
42 u32 len;
43 const struct chip_helper_init_values *vals;
44};
45
46/* Just a (counted) u16 array */
47struct data_array_t
48{
49 u32 len;
50 const u16 *vals;
51};
52
53struct reset_prog_t
54{
55 u32 len;
56 const struct chip_helper_reset_values *vals;
57};
58
59/* The addresses of registers that are equivalent but on
60 different host transports. */
61struct chip_map_address_t
62{
63 u16 spi, host;
64};
65
66struct map_array_t
67{
68 u32 len;
69 const struct chip_map_address_t *vals;
70};
71
72struct chip_device_regs_per_transport_t
73{
74 u16 dbg_proc_select;
75 u16 dbg_stop_status;
76 u16 window1_page; /* PROG_PMEM1 or GW1 */
77 u16 window2_page; /* PROG_PMEM2 or GW2 */
78 u16 window3_page; /* SHARED or GW3 */
79 u16 io_log_addr;
80};
81
82struct chip_device_regs_t
83{
84 u16 gbl_chip_version;
85 u16 gbl_misc_enables;
86 u16 dbg_emu_cmd;
87 struct chip_device_regs_per_transport_t host;
88 struct chip_device_regs_per_transport_t spi;
89 u16 dbg_reset;
90 u16 dbg_reset_value;
91 u16 dbg_reset_warn;
92 u16 dbg_reset_warn_value;
93 u16 dbg_reset_result;
94 u16 xap_pch;
95 u16 xap_pcl;
96 u16 proc_pc_snoop;
97 u16 watchdog_disable;
98 u16 mailbox0;
99 u16 mailbox1;
100 u16 mailbox2;
101 u16 mailbox3;
102 u16 sdio_host_int;
103 u16 shared_io_interrupt;
104 u16 sdio_hip_handshake;
105 u16 coex_status; /* Allows WAPI detection */
106};
107
108/* If allowed is false then this window does not provide this
109 type of access.
110 This describes how addresses should be shifted to make the
111 "page" address. The address is shifted left by 'page_shift'
112 and then has 'page_offset' added. This value should then be
113 written to the page register. */
114struct window_shift_info_t
115{
116 s32 allowed;
117 u32 page_shift;
118 u16 page_offset;
119};
120
121/* Each window has an address and size. These are obvious. It then
122 has a description for each type of memory that might be accessed
123 through it. There might also be a start to the offset of the window.
124 This means that that number of addresses at the start of the window
125 are unusable. */
126struct window_info_t
127{
128 u16 address;
129 u16 size;
130 u16 blocked;
131 const struct window_shift_info_t *mode;
132};
133
134/* If GBL_CHIP_VERSION and'ed with 'mask' and is equal to 'result'
135 then this is the correct set of info. If pre_bc7 is true then the
136 address of GBL_CHIP_VERSION is FF9A, else its FE81. */
137struct chip_version_t
138{
139 s32 pre_bc7;
140 u16 mask;
141 u16 result;
142 u8 sdio;
143};
144
145struct chip_device_desc_t
146{
147 struct chip_version_t chip_version;
148
149 /* This is a text string that a human might find useful (BC02, UF105x) */
150 const char *friendly_name;
151 /* This is what we show to customers */
152 const char *marketing_name;
153
154 /* Initialisation values to write following a reset */
155 struct val_array_t init;
156
157 /* Binary sequence for hard reset */
158 struct reset_prog_t reset_prog;
159
160 /* The register map */
161 const struct chip_device_regs_t *regs;
162
163 /* Some misc. info on the chip */
164 struct
165 {
166 u32 has_flash : 1;
167 u32 has_ext_sram : 1;
168 u32 has_rom : 1;
169 u32 has_bt : 1;
170 u32 has_wlan : 1;
171 } bools;
172
173 /* This table is used to remap register addresses depending on what
174 host interface is used. On the BC7 and later chips there are
175 multiple sets of memory window registers, on for each host
176 interafce (SDIO / SPI). The correct one is needed. */
177 struct map_array_t map;
178
179 /* The offsets into the program address space of the different types of memory.
180 The RAM offset is probably the most useful. */
181 struct
182 {
183 u32 ram;
184 u32 rom;
185 u32 flash;
186 u32 ext_sram;
187 } prog_offset;
188
189 /* The offsets into the data address space of interesting things. */
190 struct
191 {
192 u16 ram;
193 /* maybe add shared / page tables? */
194 } data_offset;
195
196 /* Information on the different windows */
197 const struct window_info_t *windows[CHIP_HELPER_WINDOW_COUNT];
198};
199
200#endif /* CSR_WIFI_HIP_CHIPHELPER_PRIVATE_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_conversions.h b/drivers/staging/csr/csr_wifi_hip_conversions.h
deleted file mode 100644
index bf7a52e82995..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_conversions.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ---------------------------------------------------------------------------
13 *
14 * FILE: csr_wifi_hip_conversions.h
15 *
16 * PURPOSE:
17 * This header file provides the macros for converting to and from
18 * wire format.
19 * These macros *MUST* work for little-endian AND big-endian hosts.
20 *
21 * ---------------------------------------------------------------------------
22 */
23#ifndef __CSR_WIFI_HIP_CONVERSIONS_H__
24#define __CSR_WIFI_HIP_CONVERSIONS_H__
25
26#define SIZEOF_UINT16 2
27#define SIZEOF_UINT32 4
28#define SIZEOF_UINT64 8
29
30#define SIZEOF_SIGNAL_HEADER 6
31#define SIZEOF_DATAREF 4
32
33
34/*
35 * Macro to retrieve the signal ID from a wire-format signal.
36 */
37#define GET_SIGNAL_ID(_buf) CSR_GET_UINT16_FROM_LITTLE_ENDIAN((_buf))
38
39/*
40 * Macros to retrieve and set the DATAREF fields in a packed (i.e. wire-format)
41 * HIP signal.
42 */
43#define GET_PACKED_DATAREF_SLOT(_buf, _ref) \
44 CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 0))
45
46#define GET_PACKED_DATAREF_LEN(_buf, _ref) \
47 CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 2))
48
49#define SET_PACKED_DATAREF_SLOT(_buf, _ref, _slot) \
50 CSR_COPY_UINT16_TO_LITTLE_ENDIAN((_slot), ((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 0))
51
52#define SET_PACKED_DATAREF_LEN(_buf, _ref, _len) \
53 CSR_COPY_UINT16_TO_LITTLE_ENDIAN((_len), ((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 2))
54
55#define GET_PACKED_MA_PACKET_REQUEST_FRAME_PRIORITY(_buf) \
56 CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 8))
57
58#define GET_PACKED_MA_PACKET_REQUEST_HOST_TAG(_buf) \
59 CSR_GET_UINT32_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 4))
60
61#define GET_PACKED_MA_PACKET_CONFIRM_HOST_TAG(_buf) \
62 CSR_GET_UINT32_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 8))
63
64#define GET_PACKED_MA_PACKET_CONFIRM_TRANSMISSION_STATUS(_buf) \
65 CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 2))
66
67
68s32 get_packed_struct_size(const u8 *buf);
69CsrResult read_unpack_signal(const u8 *ptr, CSR_SIGNAL *sig);
70CsrResult write_pack(const CSR_SIGNAL *sig, u8 *ptr, u16 *sig_len);
71
72#endif /* __CSR_WIFI_HIP_CONVERSIONS_H__ */
73
diff --git a/drivers/staging/csr/csr_wifi_hip_download.c b/drivers/staging/csr/csr_wifi_hip_download.c
deleted file mode 100644
index 2f44a383d2cf..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_download.c
+++ /dev/null
@@ -1,819 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2012
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ---------------------------------------------------------------------------
13 * FILE: csr_wifi_hip_download.c
14 *
15 * PURPOSE:
16 * Routines for downloading firmware to UniFi.
17 *
18 * ---------------------------------------------------------------------------
19 */
20#include <linux/slab.h>
21#include "csr_wifi_hip_unifi.h"
22#include "csr_wifi_hip_unifiversion.h"
23#include "csr_wifi_hip_card.h"
24#include "csr_wifi_hip_xbv.h"
25
26#undef CSR_WIFI_IGNORE_PATCH_VERSION_MISMATCH
27
28static CsrResult do_patch_download(card_t *card, void *dlpriv,
29 xbv1_t *pfwinfo, u32 boot_ctrl_addr);
30
31static CsrResult do_patch_convert_download(card_t *card,
32 void *dlpriv, xbv1_t *pfwinfo);
33
34/*
35 * ---------------------------------------------------------------------------
36 * _find_in_slut
37 *
38 * Find the offset of the appropriate object in the SLUT of a card
39 *
40 * Arguments:
41 * card Pointer to card struct
42 * psym Pointer to symbol object.
43 * id set up by caller
44 * obj will be set up by this function
45 * pslut Pointer to SLUT address, if 0xffffffff then it must be
46 * read from the chip.
47 * Returns:
48 * CSR_RESULT_SUCCESS on success
49 * Non-zero on error,
50 * CSR_WIFI_HIP_RESULT_NOT_FOUND if not found
51 * ---------------------------------------------------------------------------
52 */
53static CsrResult _find_in_slut(card_t *card, symbol_t *psym, u32 *pslut)
54{
55 u32 slut_address;
56 u16 finger_print;
57 CsrResult r;
58 CsrResult csrResult;
59
60 /* Get SLUT address */
61 if (*pslut == 0xffffffff)
62 {
63 r = card_wait_for_firmware_to_start(card, &slut_address);
64 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
65 {
66 return r;
67 }
68 if (r != CSR_RESULT_SUCCESS)
69 {
70 unifi_error(card->ospriv, "Firmware hasn't started\n");
71 return r;
72 }
73 *pslut = slut_address;
74
75 /*
76 * Firmware has started so set the SDIO bus clock to the initial speed,
77 * faster than UNIFI_SDIO_CLOCK_SAFE_HZ, to speed up the f/w download.
78 */
79 csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_INIT_HZ);
80 if (csrResult != CSR_RESULT_SUCCESS)
81 {
82 r = ConvertCsrSdioToCsrHipResult(card, csrResult);
83 return r;
84 }
85 card->sdio_clock_speed = UNIFI_SDIO_CLOCK_INIT_HZ;
86 }
87 else
88 {
89 slut_address = *pslut; /* Use previously discovered address */
90 }
91 unifi_trace(card->ospriv, UDBG4, "SLUT addr: 0x%lX\n", slut_address);
92
93 /*
94 * Check the SLUT fingerprint.
95 * The slut_address is a generic pointer so we must use unifi_card_read16().
96 */
97 unifi_trace(card->ospriv, UDBG4, "Looking for SLUT finger print\n");
98 finger_print = 0;
99 r = unifi_card_read16(card, slut_address, &finger_print);
100 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
101 {
102 return r;
103 }
104 if (r != CSR_RESULT_SUCCESS)
105 {
106 unifi_error(card->ospriv, "Failed to read SLUT finger print\n");
107 return r;
108 }
109
110 if (finger_print != SLUT_FINGERPRINT)
111 {
112 unifi_error(card->ospriv, "Failed to find SLUT fingerprint\n");
113 return CSR_RESULT_FAILURE;
114 }
115
116 /* Symbol table starts imedately after the fingerprint */
117 slut_address += 2;
118
119 while (1)
120 {
121 u16 id;
122 u32 obj;
123
124 r = unifi_card_read16(card, slut_address, &id);
125 if (r != CSR_RESULT_SUCCESS)
126 {
127 return r;
128 }
129 slut_address += 2;
130
131 if (id == CSR_SLT_END)
132 {
133 /* End of table reached: not found */
134 r = CSR_WIFI_HIP_RESULT_RANGE;
135 break;
136 }
137
138 r = unifi_read32(card, slut_address, &obj);
139 if (r != CSR_RESULT_SUCCESS)
140 {
141 return r;
142 }
143 slut_address += 4;
144
145 unifi_trace(card->ospriv, UDBG3, " found SLUT id %02d.%08lx\n", id, obj);
146
147 r = CSR_WIFI_HIP_RESULT_NOT_FOUND;
148 /* Found search term? */
149 if (id == psym->id)
150 {
151 unifi_trace(card->ospriv, UDBG1, " matched SLUT id %02d.%08lx\n", id, obj);
152 psym->obj = obj;
153 r = CSR_RESULT_SUCCESS;
154 break;
155 }
156 }
157
158 return r;
159}
160
161
162/*
163 * ---------------------------------------------------------------------------
164 * do_patch_convert_download
165 *
166 * Download the given firmware image to the UniFi, converting from FWDL
167 * to PTDL XBV format.
168 *
169 * Arguments:
170 * card Pointer to card struct
171 * dlpriv Pointer to source firmware image
172 * fwinfo Pointer to source firmware info struct
173 *
174 * Returns:
175 * CSR_RESULT_SUCCESS on success, CSR error code on error
176 *
177 * Notes:
178 * ---------------------------------------------------------------------------
179 */
180static CsrResult do_patch_convert_download(card_t *card, void *dlpriv, xbv1_t *pfwinfo)
181{
182 CsrResult r;
183 u32 slut_base = 0xffffffff;
184 void *pfw;
185 u32 psize;
186 symbol_t sym;
187
188 /* Reset the chip to guarantee that the ROM loader is running */
189 r = unifi_init(card);
190 if (r != CSR_RESULT_SUCCESS)
191 {
192 unifi_error(card->ospriv,
193 "do_patch_convert_download: failed to re-init UniFi\n");
194 return r;
195 }
196
197 /* If no unifi_helper is running, the firmware version must be read */
198 if (card->build_id == 0)
199 {
200 u32 ver = 0;
201 sym.id = CSR_SLT_BUILD_ID_NUMBER;
202 sym.obj = 0; /* To be updated by _find_in_slut() */
203
204 unifi_trace(card->ospriv, UDBG1, "Need f/w version\n");
205
206 /* Find chip build id entry in SLUT */
207 r = _find_in_slut(card, &sym, &slut_base);
208 if (r != CSR_RESULT_SUCCESS)
209 {
210 unifi_error(card->ospriv, "Failed to find CSR_SLT_BUILD_ID_NUMBER\n");
211 return CSR_RESULT_FAILURE;
212 }
213
214 /* Read running f/w version */
215 r = unifi_read32(card, sym.obj, &ver);
216 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
217 {
218 return r;
219 }
220 if (r != CSR_RESULT_SUCCESS)
221 {
222 unifi_error(card->ospriv, "Failed to read f/w id\n");
223 return CSR_RESULT_FAILURE;
224 }
225 card->build_id = ver;
226 }
227
228 /* Convert the ptest firmware to a patch against the running firmware */
229 pfw = xbv_to_patch(card, unifi_fw_read, dlpriv, pfwinfo, &psize);
230 if (!pfw)
231 {
232 unifi_error(card->ospriv, "Failed to convert f/w to patch");
233 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
234 }
235 else
236 {
237 void *desc;
238 sym.id = CSR_SLT_BOOT_LOADER_CONTROL;
239 sym.obj = 0; /* To be updated by _find_in_slut() */
240
241 /* Find boot loader control entry in SLUT */
242 r = _find_in_slut(card, &sym, &slut_base);
243 if (r != CSR_RESULT_SUCCESS)
244 {
245 unifi_error(card->ospriv, "Failed to find BOOT_LOADER_CONTROL\n");
246 kfree(pfw);
247 return CSR_RESULT_FAILURE;
248 }
249
250 r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
251 if (r != CSR_RESULT_SUCCESS)
252 {
253 unifi_error(card->ospriv, "Failed to wake UniFi\n");
254 }
255
256 /* Get a dlpriv for the patch buffer so that unifi_fw_read() can
257 * access it.
258 */
259 desc = unifi_fw_open_buffer(card->ospriv, pfw, psize);
260 if (!desc)
261 {
262 kfree(pfw);
263 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
264 }
265
266 /* Download the patch */
267 unifi_info(card->ospriv, "Downloading converted f/w as patch\n");
268 r = unifi_dl_patch(card, desc, sym.obj);
269 kfree(pfw);
270 unifi_fw_close_buffer(card->ospriv, desc);
271
272 if (r != CSR_RESULT_SUCCESS)
273 {
274 unifi_error(card->ospriv, "Converted patch download failed\n");
275 return r;
276 }
277 else
278 {
279 unifi_trace(card->ospriv, UDBG1, "Converted patch downloaded\n");
280 }
281
282 /* This command starts the firmware */
283 r = unifi_do_loader_op(card, sym.obj + 6, UNIFI_BOOT_LOADER_RESTART);
284 if (r != CSR_RESULT_SUCCESS)
285 {
286 unifi_error(card->ospriv, "Failed to write loader restart cmd\n");
287 }
288
289 return r;
290 }
291}
292
293
294/*
295 * ---------------------------------------------------------------------------
296 * unifi_dl_firmware
297 *
298 * Download the given firmware image to the UniFi.
299 *
300 * Arguments:
301 * card Pointer to card struct
302 * dlpriv A context pointer from the calling function to be
303 * passed when calling unifi_fw_read().
304 *
305 * Returns:
306 * CSR_RESULT_SUCCESS on success,
307 * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
308 * CSR_WIFI_HIP_RESULT_INVALID_VALUE error in XBV file
309 * CSR_RESULT_FAILURE SDIO error
310 *
311 * Notes:
312 * Stops and resets the chip, does the download and runs the new
313 * firmware.
314 * ---------------------------------------------------------------------------
315 */
316CsrResult unifi_dl_firmware(card_t *card, void *dlpriv)
317{
318 xbv1_t *fwinfo;
319 CsrResult r;
320
321 fwinfo = kmalloc(sizeof(xbv1_t), GFP_KERNEL);
322 if (fwinfo == NULL)
323 {
324 unifi_error(card->ospriv, "Failed to allocate memory for firmware\n");
325 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
326 }
327
328 /*
329 * Scan the firmware file to find the TLVs we are interested in.
330 * These are:
331 * - check we support the file format version in VERF
332 * - SLTP Symbol Lookup Table Pointer
333 * - FWDL firmware download segments
334 * - FWOV firmware overlay segment
335 * - VMEQ Register probe tests to verify matching h/w
336 */
337 r = xbv1_parse(card, unifi_fw_read, dlpriv, fwinfo);
338 if (r != CSR_RESULT_SUCCESS || fwinfo->mode != xbv_firmware)
339 {
340 unifi_error(card->ospriv, "File type is %s, expected firmware.\n",
341 fwinfo->mode == xbv_patch?"patch" : "unknown");
342 kfree(fwinfo);
343 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
344 }
345
346 /* UF6xxx doesn't accept firmware, only patches. Therefore we convert
347 * the file to patch format with version numbers matching the current
348 * running firmware, and then download via the patch mechanism.
349 * The sole purpose of this is to support production test firmware across
350 * different ROM releases, the test firmware being provided in non-patch
351 * format.
352 */
353 if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
354 {
355 unifi_info(card->ospriv, "Must convert f/w to patch format\n");
356 r = do_patch_convert_download(card, dlpriv, fwinfo);
357 }
358 else
359 {
360 /* Older UniFi chips allowed firmware to be directly loaded onto the
361 * chip, which is no longer supported.
362 */
363 unifi_error(card->ospriv, "Only patch downloading supported\n");
364 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
365 }
366
367 kfree(fwinfo);
368 return r;
369} /* unifi_dl_firmware() */
370
371
372/*
373 * ---------------------------------------------------------------------------
374 * unifi_dl_patch
375 *
376 * Load the given patch set into UniFi.
377 *
378 * Arguments:
379 * card Pointer to card struct
380 * dlpriv The os specific handle to the firmware file.
381 * boot_ctrl The address of the boot loader control structure.
382 *
383 * Returns:
384 * CSR_RESULT_SUCCESS on success,
385 * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
386 * CSR_WIFI_HIP_RESULT_INVALID_VALUE error in XBV file
387 * CSR_RESULT_FAILURE SDIO error
388 *
389 * Notes:
390 * This ends up telling UniFi to restart.
391 * ---------------------------------------------------------------------------
392 */
393CsrResult unifi_dl_patch(card_t *card, void *dlpriv, u32 boot_ctrl)
394{
395 xbv1_t *fwinfo;
396 CsrResult r;
397
398 unifi_info(card->ospriv, "unifi_dl_patch %p %08x\n", dlpriv, boot_ctrl);
399
400 fwinfo = kmalloc(sizeof(xbv1_t), GFP_KERNEL);
401 if (fwinfo == NULL)
402 {
403 unifi_error(card->ospriv, "Failed to allocate memory for patches\n");
404 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
405 }
406
407 /*
408 * Scan the firmware file to find the TLVs we are interested in.
409 * These are:
410 * - check we support the file format version in VERF
411 * - FWID The build ID of the ROM that we can patch
412 * - PTDL patch download segments
413 */
414 r = xbv1_parse(card, unifi_fw_read, dlpriv, fwinfo);
415 if (r != CSR_RESULT_SUCCESS || fwinfo->mode != xbv_patch)
416 {
417 kfree(fwinfo);
418 unifi_error(card->ospriv, "Failed to read in patch file\n");
419 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
420 }
421
422 /*
423 * We have to check the build id read from the SLUT against that
424 * for the patch file. They have to match exactly.
425 * "card->build_id" == XBV1.PTCH.FWID
426 */
427 if (card->build_id != fwinfo->build_id)
428 {
429 unifi_error(card->ospriv, "Wrong patch file for chip (chip = %lu, file = %lu)\n",
430 card->build_id, fwinfo->build_id);
431 kfree(fwinfo);
432#ifndef CSR_WIFI_IGNORE_PATCH_VERSION_MISMATCH
433 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
434#else
435 fwinfo = NULL;
436 dlpriv = NULL;
437 return CSR_RESULT_SUCCESS;
438#endif
439 }
440
441 r = do_patch_download(card, dlpriv, fwinfo, boot_ctrl);
442 if (r != CSR_RESULT_SUCCESS)
443 {
444 unifi_error(card->ospriv, "Failed to patch image\n");
445 }
446
447 kfree(fwinfo);
448
449 return r;
450} /* unifi_dl_patch() */
451
452
453void* unifi_dl_fw_read_start(card_t *card, s8 is_fw)
454{
455 card_info_t card_info;
456
457 unifi_card_info(card, &card_info);
458 unifi_trace(card->ospriv, UDBG5,
459 "id=%d, ver=0x%x, fw_build=%u, fw_hip=0x%x, block_size=%d\n",
460 card_info.chip_id, card_info.chip_version,
461 card_info.fw_build, card_info.fw_hip_version,
462 card_info.sdio_block_size);
463
464 return unifi_fw_read_start(card->ospriv, is_fw, &card_info);
465}
466
467
468/*
469 * ---------------------------------------------------------------------------
470 * safe_read_shared_location
471 *
472 * Read a shared memory location repeatedly until we get two readings
473 * the same.
474 *
475 * Arguments:
476 * card Pointer to card context struct.
477 * unifi_addr UniFi shared-data-memory address to access.
478 * pdata Pointer to a byte variable for the value read.
479 *
480 *
481 * Returns:
482 * CSR_RESULT_SUCCESS on success, CSR error code on failure
483 * ---------------------------------------------------------------------------
484 */
485static CsrResult safe_read_shared_location(card_t *card, u32 address, u8 *pdata)
486{
487 CsrResult r;
488 u16 limit = 1000;
489 u8 b, b2;
490
491 *pdata = 0;
492
493 r = unifi_read_8_or_16(card, address, &b);
494 if (r != CSR_RESULT_SUCCESS)
495 {
496 return r;
497 }
498
499 while (limit--)
500 {
501 r = unifi_read_8_or_16(card, address, &b2);
502 if (r != CSR_RESULT_SUCCESS)
503 {
504 return r;
505 }
506
507 /* When we have a stable value, return it */
508 if (b == b2)
509 {
510 *pdata = b;
511 return CSR_RESULT_SUCCESS;
512 }
513
514 b = b2;
515 }
516
517 return CSR_RESULT_FAILURE;
518} /* safe_read_shared_location() */
519
520
521/*
522 * ---------------------------------------------------------------------------
523 * unifi_do_loader_op
524 *
525 * Send a loader / boot_loader command to the UniFi and wait for
526 * it to complete.
527 *
528 * Arguments:
529 * card Pointer to card context struct.
530 * op_addr The address of the loader operation control word.
531 * opcode The operation to perform.
532 *
533 * Returns:
534 * CSR_RESULT_SUCCESS on success
535 * CSR_RESULT_FAILURE SDIO error or SDIO/XAP timeout
536 * ---------------------------------------------------------------------------
537 */
538
539/*
540 * Ideally instead of sleeping, we want to busy wait.
541 * Currently there is no framework API to do this. When it becomes available,
542 * we can use it to busy wait using usecs
543 */
544#define OPERATION_TIMEOUT_LOOPS (100) /* when OPERATION_TIMEOUT_DELAY==1, (500) otherwise */
545#define OPERATION_TIMEOUT_DELAY 1 /* msec, or 200usecs */
546
547CsrResult unifi_do_loader_op(card_t *card, u32 op_addr, u8 opcode)
548{
549 CsrResult r;
550 s16 op_retries;
551
552 unifi_trace(card->ospriv, UDBG4, "Loader cmd 0x%0x -> 0x%08x\n", opcode, op_addr);
553
554 /* Set the Operation command byte to the opcode */
555 r = unifi_write_8_or_16(card, op_addr, opcode);
556 if (r != CSR_RESULT_SUCCESS)
557 {
558 unifi_error(card->ospriv, "Failed to write loader copy command\n");
559 return r;
560 }
561
562 /* Wait for Operation command byte to be Idle */
563 /* Typically takes ~100us */
564 op_retries = 0;
565 r = CSR_RESULT_SUCCESS;
566 while (1)
567 {
568 u8 op;
569
570 /*
571 * Read the memory location until two successive reads give
572 * the same value.
573 * Then handle it.
574 */
575 r = safe_read_shared_location(card, op_addr, &op);
576 if (r != CSR_RESULT_SUCCESS)
577 {
578 unifi_error(card->ospriv, "Failed to read loader status\n");
579 break;
580 }
581
582 if (op == UNIFI_LOADER_IDLE)
583 {
584 /* Success */
585 break;
586 }
587
588 if (op != opcode)
589 {
590 unifi_error(card->ospriv, "Error reported by loader: 0x%X\n", op);
591 r = CSR_RESULT_FAILURE;
592 break;
593 }
594
595 /* Allow 500us timeout */
596 if (++op_retries >= OPERATION_TIMEOUT_LOOPS)
597 {
598 unifi_error(card->ospriv, "Timeout waiting for loader to ack transfer\n");
599 /* Stop XAPs to aid post-mortem */
600 r = unifi_card_stop_processor(card, UNIFI_PROC_BOTH);
601 if (r != CSR_RESULT_SUCCESS)
602 {
603 unifi_error(card->ospriv, "Failed to stop UniFi processors\n");
604 }
605 else
606 {
607 r = CSR_RESULT_FAILURE;
608 }
609 break;
610 }
611 CsrThreadSleep(OPERATION_TIMEOUT_DELAY);
612 } /* Loop exits with r != CSR_RESULT_SUCCESS on error */
613
614 return r;
615} /* unifi_do_loader_op() */
616
617
618/*
619 * ---------------------------------------------------------------------------
620 * send_ptdl_to_unifi
621 *
622 * Copy a patch block from userland to the UniFi.
623 * This function reads data, 2K at a time, from userland and writes
624 * it to the UniFi.
625 *
626 * Arguments:
627 * card A pointer to the card structure
628 * dlpriv The os specific handle for the firmware file
629 * ptdl A pointer ot the PTDL block
630 * handle The buffer handle to use for the xfer
631 * op_addr The address of the loader operation control word
632 *
633 * Returns:
634 * Number of bytes sent (Positive) or negative value indicating
635 * error code:
636 * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
637 * CSR_WIFI_HIP_RESULT_INVALID_VALUE error in XBV file
638 * CSR_RESULT_FAILURE SDIO error
639 * ---------------------------------------------------------------------------
640 */
641static CsrResult send_ptdl_to_unifi(card_t *card, void *dlpriv,
642 const struct PTDL *ptdl, u32 handle,
643 u32 op_addr)
644{
645 u32 offset;
646 u8 *buf;
647 s32 data_len;
648 u32 write_len;
649 CsrResult r;
650 const u16 buf_size = 2 * 1024;
651
652 offset = ptdl->dl_offset;
653 data_len = ptdl->dl_size;
654
655 if (data_len > buf_size)
656 {
657 unifi_error(card->ospriv, "PTDL block is too large (%u)\n",
658 ptdl->dl_size);
659 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
660 }
661
662 buf = kmalloc(buf_size, GFP_KERNEL);
663 if (buf == NULL)
664 {
665 unifi_error(card->ospriv, "Failed to allocate transfer buffer for firmware download\n");
666 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
667 }
668
669 r = CSR_RESULT_SUCCESS;
670
671 if (unifi_fw_read(card->ospriv, dlpriv, offset, buf, data_len) != data_len)
672 {
673 unifi_error(card->ospriv, "Failed to read from file\n");
674 }
675 else
676 {
677 /* We can always round these if the host wants to */
678 if (card->sdio_io_block_pad)
679 {
680 write_len = (data_len + (card->sdio_io_block_size - 1)) &
681 ~(card->sdio_io_block_size - 1);
682
683 /* Zero out the rest of the buffer (This isn't needed, but it
684 * makes debugging things later much easier). */
685 memset(buf + data_len, 0, write_len - data_len);
686 }
687 else
688 {
689 write_len = data_len;
690 }
691
692 r = unifi_bulk_rw_noretry(card, handle, buf, write_len, UNIFI_SDIO_WRITE);
693 if (r != CSR_RESULT_SUCCESS)
694 {
695 unifi_error(card->ospriv, "CMD53 failed writing %d bytes to handle %ld\n",
696 data_len, handle);
697 }
698 else
699 {
700 /*
701 * Can change the order of things to overlap read from file
702 * with copy to unifi
703 */
704 r = unifi_do_loader_op(card, op_addr, UNIFI_BOOT_LOADER_PATCH);
705 }
706 }
707
708 kfree(buf);
709
710 if (r != CSR_RESULT_SUCCESS && r != CSR_WIFI_HIP_RESULT_NO_DEVICE)
711 {
712 unifi_error(card->ospriv, "Failed to copy block of %u bytes to UniFi\n",
713 ptdl->dl_size);
714 }
715
716 return r;
717} /* send_ptdl_to_unifi() */
718
719
720/*
721 * ---------------------------------------------------------------------------
722 * do_patch_download
723 *
724 * This function downloads a set of patches to UniFi and then
725 * causes it to restart.
726 *
727 * Arguments:
728 * card Pointer to card struct.
729 * dlpriv A context pointer from the calling function to be
730 * used when reading the XBV file. This can be NULL
731 * in which case not patches are applied.
732 * pfwinfo Pointer to a fwinfo struct describing the f/w
733 * XBV file.
734 * boot_ctrl_addr The address of the boot loader control structure.
735 *
736 * Returns:
737 * 0 on success, or an error code
738 * CSR_WIFI_HIP_RESULT_INVALID_VALUE for a bad laoader version number
739 * ---------------------------------------------------------------------------
740 */
741static CsrResult do_patch_download(card_t *card, void *dlpriv, xbv1_t *pfwinfo, u32 boot_ctrl_addr)
742{
743 CsrResult r;
744 s32 i;
745 u16 loader_version;
746 u16 handle;
747 u32 total_bytes;
748
749 /*
750 * Read info from the SDIO Loader Control Data Structure
751 */
752 /* Check the loader version */
753 r = unifi_card_read16(card, boot_ctrl_addr, &loader_version);
754 if (r != CSR_RESULT_SUCCESS)
755 {
756 unifi_error(card->ospriv, "Patch download: Failed to read loader version\n");
757 return r;
758 }
759 unifi_trace(card->ospriv, UDBG2, "Patch download: boot loader version 0x%04X\n", loader_version);
760 switch (loader_version)
761 {
762 case 0x0000:
763 break;
764
765 default:
766 unifi_error(card->ospriv, "Patch loader version (0x%04X) is not supported by this driver\n",
767 loader_version);
768 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
769 }
770
771 /* Retrieve the handle to use with CMD53 */
772 r = unifi_card_read16(card, boot_ctrl_addr + 4, &handle);
773 if (r != CSR_RESULT_SUCCESS)
774 {
775 unifi_error(card->ospriv, "Patch download: Failed to read loader handle\n");
776 return r;
777 }
778
779 /* Set the mask of LEDs to flash */
780 if (card->loader_led_mask)
781 {
782 r = unifi_card_write16(card, boot_ctrl_addr + 2,
783 (u16)card->loader_led_mask);
784 if (r != CSR_RESULT_SUCCESS)
785 {
786 unifi_error(card->ospriv, "Patch download: Failed to write LED mask\n");
787 return r;
788 }
789 }
790
791 total_bytes = 0;
792
793 /* Copy download data to UniFi memory */
794 for (i = 0; i < pfwinfo->num_ptdl; i++)
795 {
796 unifi_trace(card->ospriv, UDBG3, "Patch download: %d Downloading for %d from offset %d\n",
797 i,
798 pfwinfo->ptdl[i].dl_size,
799 pfwinfo->ptdl[i].dl_offset);
800
801 r = send_ptdl_to_unifi(card, dlpriv, &pfwinfo->ptdl[i],
802 handle, boot_ctrl_addr + 6);
803 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
804 {
805 return r;
806 }
807 if (r != CSR_RESULT_SUCCESS)
808 {
809 unifi_error(card->ospriv, "Patch failed after %u bytes\n",
810 total_bytes);
811 return r;
812 }
813 total_bytes += pfwinfo->ptdl[i].dl_size;
814 }
815
816 return CSR_RESULT_SUCCESS;
817} /* do_patch_download() */
818
819
diff --git a/drivers/staging/csr/csr_wifi_hip_dump.c b/drivers/staging/csr/csr_wifi_hip_dump.c
deleted file mode 100644
index 7b7eec49d028..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_dump.c
+++ /dev/null
@@ -1,837 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2012
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ---------------------------------------------------------------------------
13 * FILE: csr_wifi_hip_dump.c
14 *
15 * PURPOSE:
16 * Routines for retrieving and buffering core status from the UniFi
17 *
18 * ---------------------------------------------------------------------------
19 */
20#include <linux/slab.h>
21#include "csr_wifi_hip_unifi.h"
22#include "csr_wifi_hip_unifiversion.h"
23#include "csr_wifi_hip_card.h"
24
25/* Locations to capture in dump (XAP words) */
26#define HIP_CDUMP_FIRST_CPUREG (0xFFE0) /* First CPU register */
27#define HIP_CDUMP_FIRST_LO (0) /* Start of low address range */
28#define HIP_CDUMP_FIRST_HI_MAC (0x3C00) /* Start of MAC high area */
29#define HIP_CDUMP_FIRST_HI_PHY (0x1C00) /* Start of PHY high area */
30#define HIP_CDUMP_FIRST_SH (0) /* Start of shared memory area */
31
32#define HIP_CDUMP_NCPUREGS (10) /* No. of 16-bit XAP registers */
33#define HIP_CDUMP_NWORDS_LO (0x0100) /* Low area size in 16-bit words */
34#define HIP_CDUMP_NWORDS_HI (0x0400) /* High area size in 16-bit words */
35#define HIP_CDUMP_NWORDS_SH (0x0500) /* Shared memory area size, 16-bit words */
36
37#define HIP_CDUMP_NUM_ZONES 7 /* Number of UniFi memory areas to capture */
38
39/* Mini-coredump state */
40typedef struct coredump_buf
41{
42 u16 count; /* serial number of dump */
43 u32 timestamp; /* host's system time at capture */
44 s16 requestor; /* request: 0=auto dump, 1=manual */
45 u16 chip_ver;
46 u32 fw_ver;
47 u16 *zone[HIP_CDUMP_NUM_ZONES];
48
49 struct coredump_buf *next; /* circular list */
50 struct coredump_buf *prev; /* circular list */
51} coredump_buffer;
52
53/* Structure used to describe a zone of chip memory captured by mini-coredump */
54struct coredump_zone
55{
56 unifi_coredump_space_t space; /* XAP memory space this zone covers */
57 enum unifi_dbg_processors_select cpu; /* XAP CPU core selector */
58 u32 gp; /* Generic Pointer to memory zone on XAP */
59 u16 offset; /* 16-bit XAP word offset of zone in memory space */
60 u16 length; /* Length of zone in XAP words */
61};
62
63static CsrResult unifi_coredump_from_sdio(card_t *card, coredump_buffer *dump_buf);
64static CsrResult unifi_coredump_read_zones(card_t *card, coredump_buffer *dump_buf);
65static CsrResult unifi_coredump_read_zone(card_t *card, u16 *zone,
66 const struct coredump_zone *def);
67static s32 get_value_from_coredump(const coredump_buffer *dump,
68 const unifi_coredump_space_t space, const u16 offset);
69
70/* Table of chip memory zones we capture on mini-coredump */
71static const struct coredump_zone zonedef_table[HIP_CDUMP_NUM_ZONES] = {
72 { UNIFI_COREDUMP_MAC_REG, UNIFI_PROC_MAC, UNIFI_MAKE_GP(REGISTERS, HIP_CDUMP_FIRST_CPUREG * 2), HIP_CDUMP_FIRST_CPUREG, HIP_CDUMP_NCPUREGS },
73 { UNIFI_COREDUMP_PHY_REG, UNIFI_PROC_PHY, UNIFI_MAKE_GP(REGISTERS, HIP_CDUMP_FIRST_CPUREG * 2), HIP_CDUMP_FIRST_CPUREG, HIP_CDUMP_NCPUREGS },
74 { UNIFI_COREDUMP_SH_DMEM, UNIFI_PROC_INVALID, UNIFI_MAKE_GP(SH_DMEM, HIP_CDUMP_FIRST_SH * 2), HIP_CDUMP_FIRST_SH, HIP_CDUMP_NWORDS_SH },
75 { UNIFI_COREDUMP_MAC_DMEM, UNIFI_PROC_MAC, UNIFI_MAKE_GP(MAC_DMEM, HIP_CDUMP_FIRST_LO * 2), HIP_CDUMP_FIRST_LO, HIP_CDUMP_NWORDS_LO },
76 { UNIFI_COREDUMP_MAC_DMEM, UNIFI_PROC_MAC, UNIFI_MAKE_GP(MAC_DMEM, HIP_CDUMP_FIRST_HI_MAC * 2), HIP_CDUMP_FIRST_HI_MAC, HIP_CDUMP_NWORDS_HI },
77 { UNIFI_COREDUMP_PHY_DMEM, UNIFI_PROC_PHY, UNIFI_MAKE_GP(PHY_DMEM, HIP_CDUMP_FIRST_LO * 2), HIP_CDUMP_FIRST_LO, HIP_CDUMP_NWORDS_LO },
78 { UNIFI_COREDUMP_PHY_DMEM, UNIFI_PROC_PHY, UNIFI_MAKE_GP(PHY_DMEM, HIP_CDUMP_FIRST_HI_PHY * 2), HIP_CDUMP_FIRST_HI_PHY, HIP_CDUMP_NWORDS_HI },
79};
80
81/*
82 * ---------------------------------------------------------------------------
83 * unifi_coredump_request_at_next_reset
84 *
85 * Request that a mini-coredump is performed when the driver has
86 * completed resetting the UniFi device.
87 *
88 * Arguments:
89 * card Pointer to card struct
90 * enable If non-zero, sets the request.
91 * If zero, cancels any pending request.
92 *
93 * Returns:
94 * CSR_RESULT_SUCCESS or CSR HIP error code
95 *
96 * Notes:
97 * This function is typically called once the driver has detected that
98 * the UniFi device has become unresponsive due to crash, or internal
99 * watchdog reset. The driver must reset it to regain communication and,
100 * immediately after that, the mini-coredump can be captured.
101 * ---------------------------------------------------------------------------
102 */
103CsrResult unifi_coredump_request_at_next_reset(card_t *card, s8 enable)
104{
105 CsrResult r;
106
107 if (enable)
108 {
109 unifi_trace(card->ospriv, UDBG2, "Mini-coredump requested after reset\n");
110 }
111
112 if (card == NULL)
113 {
114 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
115 }
116 else
117 {
118 card->request_coredump_on_reset = enable?1 : 0;
119 r = CSR_RESULT_SUCCESS;
120 }
121
122 return r;
123}
124
125
126/*
127 * ---------------------------------------------------------------------------
128 * unifi_coredump_handle_request
129 *
130 * Performs a coredump now, if one was requested, and clears the request.
131 *
132 * Arguments:
133 * card Pointer to card struct
134 *
135 * Returns:
136 * CSR_RESULT_SUCCESS or CSR HIP error code
137 *
138 * Notes:
139 * ---------------------------------------------------------------------------
140 */
141CsrResult unifi_coredump_handle_request(card_t *card)
142{
143 CsrResult r = CSR_RESULT_SUCCESS;
144
145 if (card == NULL)
146 {
147 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
148 }
149 else
150 {
151 if (card->request_coredump_on_reset == 1)
152 {
153 card->request_coredump_on_reset = 0;
154 r = unifi_coredump_capture(card, NULL);
155 }
156 }
157
158 return r;
159}
160
161
162/*
163 * ---------------------------------------------------------------------------
164 * unifi_coredump_capture
165 *
166 * Capture the current status of the UniFi device.
167 * Various registers are buffered for future offline inspection.
168 *
169 * Arguments:
170 * card Pointer to card struct
171 * req Pointer to request struct, or NULL:
172 * A coredump requested manually by the user app
173 * will have a request struct pointer, an automatic
174 * coredump will have a NULL pointer.
175 * Returns:
176 * CSR_RESULT_SUCCESS on success,
177 * CSR_RESULT_FAILURE SDIO error
178 * CSR_WIFI_HIP_RESULT_INVALID_VALUE Initialisation not complete
179 *
180 * Notes:
181 * The result is a filled entry in the circular buffer of core dumps,
182 * values from which can be extracted to userland via an ioctl.
183 * ---------------------------------------------------------------------------
184 */
185CsrResult unifi_coredump_capture(card_t *card, struct unifi_coredump_req *req)
186{
187 CsrResult r = CSR_RESULT_SUCCESS;
188 static u16 dump_seq_no = 1;
189 u32 time_of_capture;
190
191 if (card->dump_next_write == NULL)
192 {
193 r = CSR_RESULT_SUCCESS;
194 goto done;
195 }
196
197 /* Reject forced capture before initialisation has happened */
198 if (card->helper == NULL)
199 {
200 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
201 goto done;
202 }
203
204
205 /*
206 * Force a mini-coredump capture right now
207 */
208 time_of_capture = CsrTimeGet(NULL);
209 unifi_info(card->ospriv, "Mini-coredump capture at t=%u\n", time_of_capture);
210
211 /* Wake up the processors so we can talk to them */
212 r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
213 if (r != CSR_RESULT_SUCCESS)
214 {
215 unifi_error(card->ospriv, "Failed to wake UniFi\n");
216 goto done;
217 }
218 CsrThreadSleep(20);
219
220 /* Stop both XAPs */
221 unifi_trace(card->ospriv, UDBG4, "Stopping XAPs for coredump capture\n");
222 r = unifi_card_stop_processor(card, UNIFI_PROC_BOTH);
223 if (r != CSR_RESULT_SUCCESS)
224 {
225 unifi_error(card->ospriv, "Failed to stop UniFi XAPs\n");
226 goto done;
227 }
228
229 /* Dump core into the next available slot in the circular list */
230 r = unifi_coredump_from_sdio(card, card->dump_next_write);
231 if (r == CSR_RESULT_SUCCESS)
232 {
233 /* Record whether the dump was manual or automatic */
234 card->dump_next_write->requestor = (req?1 : 0);
235 card->dump_next_write->timestamp = time_of_capture;
236 /* Advance to the next buffer */
237 card->dump_next_write->count = dump_seq_no++;
238 card->dump_cur_read = card->dump_next_write;
239 card->dump_next_write = card->dump_next_write->next;
240
241 /* Sequence no. of zero indicates slot not in use, so handle wrap */
242 if (dump_seq_no == 0)
243 {
244 dump_seq_no = 1;
245 }
246
247 unifi_trace(card->ospriv, UDBG3,
248 "Coredump (%p), SeqNo=%d, cur_read=%p, next_write=%p\n",
249 req,
250 card->dump_cur_read->count,
251 card->dump_cur_read, card->dump_next_write);
252 }
253
254 /* Start both XAPs */
255 unifi_trace(card->ospriv, UDBG4, "Restart XAPs after coredump\n");
256 r = card_start_processor(card, UNIFI_PROC_BOTH);
257 if (r != CSR_RESULT_SUCCESS)
258 {
259 unifi_error(card->ospriv, "Failed to start UniFi XAPs\n");
260 goto done;
261 }
262
263done:
264 return r;
265} /* unifi_coredump_capture() */
266
267
268/*
269 * ---------------------------------------------------------------------------
270 * get_value_from_coredump
271 *
272 *
273 *
274 * Arguments:
275 * dump Pointer to buffered coredump data
276 * offset_in_space XAP memory space to retrieve from the buffer (there
277 * may be more than one zone covering the same memory
278 * space, but starting from different offsets).
279 * offset Offset within the XAP memory space to be retrieved
280 *
281 * Returns:
282 * >=0 Register value on success
283 * <0 Register out of range of any captured zones
284 *
285 * Notes:
286 * ---------------------------------------------------------------------------
287 */
288static s32 get_value_from_coredump(const coredump_buffer *coreDump,
289 const unifi_coredump_space_t space,
290 const u16 offset_in_space)
291{
292 s32 r = -1;
293 u16 offset_in_zone;
294 u32 zone_end_offset;
295 s32 i;
296 const struct coredump_zone *def = &zonedef_table[0];
297
298 /* Search zone def table for a match with the requested memory space */
299 for (i = 0; i < HIP_CDUMP_NUM_ZONES; i++, def++)
300 {
301 if (space == def->space)
302 {
303 zone_end_offset = def->offset + def->length;
304
305 /* Is the space offset contained in this zone? */
306 if (offset_in_space < zone_end_offset &&
307 offset_in_space >= def->offset)
308 {
309 /* Calculate the offset of data within the zone buffer */
310 offset_in_zone = offset_in_space - def->offset;
311 r = (s32) * (coreDump->zone[i] + offset_in_zone);
312
313 unifi_trace(NULL, UDBG6,
314 "sp %d, offs 0x%04x = 0x%04x (in z%d 0x%04x->0x%04x)\n",
315 space, offset_in_space, r,
316 i, def->offset, zone_end_offset - 1);
317 break;
318 }
319 }
320 }
321 return r;
322}
323
324
325/*
326 * ---------------------------------------------------------------------------
327 * unifi_coredump_get_value
328 *
329 * Retrieve the value of a register buffered from a previous core dump,
330 * so that it may be reported back to application code.
331 *
332 * Arguments:
333 * card Pointer to card struct
334 * req_reg Pointer to request parameter partially filled. This
335 * function puts in the values retrieved from the dump.
336 *
337 * Returns:
338 * CSR_RESULT_SUCCESS on success, or:
339 * CSR_WIFI_HIP_RESULT_INVALID_VALUE Null parameter error
340 * CSR_WIFI_HIP_RESULT_RANGE Register out of range
341 * CSR_WIFI_HIP_RESULT_NOT_FOUND Dump index not (yet) captured
342 *
343 * Notes:
344 * ---------------------------------------------------------------------------
345 */
346CsrResult unifi_coredump_get_value(card_t *card, struct unifi_coredump_req *req)
347{
348 CsrResult r;
349 s32 i = 0;
350 coredump_buffer *find_dump = NULL;
351
352 if (req == NULL || card == NULL)
353 {
354 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
355 goto done;
356 }
357 req->value = -1;
358 if (card->dump_buf == NULL)
359 {
360 unifi_trace(card->ospriv, UDBG2, "No coredump buffers\n");
361 r = CSR_WIFI_HIP_RESULT_NOT_FOUND; /* Coredumping disabled */
362 goto done;
363 }
364 if (card->dump_cur_read == NULL)
365 {
366 unifi_trace(card->ospriv, UDBG4, "No coredumps captured\n");
367 r = CSR_WIFI_HIP_RESULT_NOT_FOUND; /* No coredump yet captured */
368 goto done;
369 }
370
371 /* Find the requested dump buffer */
372 switch (req->index)
373 {
374 case 0: /* Newest */
375 find_dump = card->dump_cur_read;
376 break;
377 case -1: /* Oldest: The next used slot forward */
378 for (find_dump = card->dump_cur_read->next;
379 (find_dump->count == 0) && (find_dump != card->dump_cur_read);
380 find_dump = card->dump_cur_read->next)
381 {
382 }
383 break;
384 default: /* Number of steps back from current read position */
385 for (i = 0, find_dump = card->dump_cur_read;
386 i < req->index;
387 i++, find_dump = find_dump->prev)
388 {
389 /* Walk the list for the index'th entry, but
390 * stop when about to wrap. */
391 unifi_trace(card->ospriv, UDBG6,
392 "%d: %d, @%p, p=%p, n=%p, cr=%p, h=%p\n",
393 i, find_dump->count, find_dump, find_dump->prev,
394 find_dump->next, card->dump_cur_read, card->dump_buf);
395 if (find_dump->prev == card->dump_cur_read)
396 {
397 /* Wrapped but still not found, index out of range */
398 if (i != req->index)
399 {
400 unifi_trace(card->ospriv, UDBG6,
401 "Dump index %d not found %d\n", req->index, i);
402 r = CSR_WIFI_HIP_RESULT_NOT_FOUND;
403 goto done;
404 }
405 break;
406 }
407 }
408 break;
409 }
410
411 /* Check if the slot is actually filled with a core dump */
412 if (find_dump->count == 0)
413 {
414 unifi_trace(card->ospriv, UDBG4, "Not captured %d\n", req->index);
415 r = CSR_WIFI_HIP_RESULT_NOT_FOUND;
416 goto done;
417 }
418
419 unifi_trace(card->ospriv, UDBG6, "Req index %d, found seq %d at step %d\n",
420 req->index, find_dump->count, i);
421
422 /* Find the appropriate entry in the buffer */
423 req->value = get_value_from_coredump(find_dump, req->space, (u16)req->offset);
424 if (req->value < 0)
425 {
426 r = CSR_WIFI_HIP_RESULT_RANGE; /* Un-captured register */
427 unifi_trace(card->ospriv, UDBG4,
428 "Can't read space %d, reg 0x%x from coredump buffer %d\n",
429 req->space, req->offset, req->index);
430 }
431 else
432 {
433 r = CSR_RESULT_SUCCESS;
434 }
435
436 /* Update the private request structure with the found values */
437 req->chip_ver = find_dump->chip_ver;
438 req->fw_ver = find_dump->fw_ver;
439 req->timestamp = find_dump->timestamp;
440 req->requestor = find_dump->requestor;
441 req->serial = find_dump->count;
442
443done:
444 return r;
445} /* unifi_coredump_get_value() */
446
447
448/*
449 * ---------------------------------------------------------------------------
450 * unifi_coredump_read_zone
451 *
452 * Captures a UniFi memory zone into a buffer on the host
453 *
454 * Arguments:
455 * card Pointer to card struct
456 * zonebuf Pointer to on-host buffer to dump the memory zone into
457 * def Pointer to description of the memory zone to read from UniFi.
458 *
459 * Returns:
460 * CSR_RESULT_SUCCESS on success, or:
461 * CSR_RESULT_FAILURE SDIO error
462 * CSR_WIFI_HIP_RESULT_INVALID_VALUE Parameter error
463 *
464 * Notes:
465 * It is assumed that the caller has already stopped the XAPs
466 * ---------------------------------------------------------------------------
467 */
468static CsrResult unifi_coredump_read_zone(card_t *card, u16 *zonebuf, const struct coredump_zone *def)
469{
470 CsrResult r;
471
472 if (zonebuf == NULL || def == NULL)
473 {
474 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
475 goto done;
476 }
477
478 /* Select XAP CPU if necessary */
479 if (def->cpu != UNIFI_PROC_INVALID)
480 {
481 if (def->cpu != UNIFI_PROC_MAC && def->cpu != UNIFI_PROC_PHY)
482 {
483 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
484 goto done;
485 }
486 r = unifi_set_proc_select(card, def->cpu);
487 if (r != CSR_RESULT_SUCCESS)
488 {
489 goto done;
490 }
491 }
492
493 unifi_trace(card->ospriv, UDBG4,
494 "Dump sp %d, offs 0x%04x, 0x%04x words @GP=%08x CPU %d\n",
495 def->space, def->offset, def->length, def->gp, def->cpu);
496
497 /* Read on-chip RAM (byte-wise) */
498 r = unifi_card_readn(card, def->gp, zonebuf, (u16)(def->length * 2));
499 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
500 {
501 goto done;
502 }
503 if (r != CSR_RESULT_SUCCESS)
504 {
505 unifi_error(card->ospriv, "Can't read UniFi shared data area\n");
506 goto done;
507 }
508
509done:
510 return r;
511}
512
513
514/*
515 * ---------------------------------------------------------------------------
516 * unifi_coredump_read_zones
517 *
518 * Walks through the table of on-chip memory zones defined in zonedef_table,
519 * and reads each of them from the UniFi chip
520 *
521 * Arguments:
522 * card Pointer to card struct
523 * dump_buf Buffer into which register values will be dumped
524 *
525 * Returns:
526 * CSR_RESULT_SUCCESS on success, or:
527 * CSR_RESULT_FAILURE SDIO error
528 * CSR_WIFI_HIP_RESULT_INVALID_VALUE Parameter error
529 *
530 * Notes:
531 * It is assumed that the caller has already stopped the XAPs
532 * ---------------------------------------------------------------------------
533 */
534static CsrResult unifi_coredump_read_zones(card_t *card, coredump_buffer *dump_buf)
535{
536 CsrResult r = CSR_RESULT_SUCCESS;
537 s32 i;
538
539 /* Walk the table of coredump zone definitions and read them from the chip */
540 for (i = 0;
541 (i < HIP_CDUMP_NUM_ZONES) && (r == 0);
542 i++)
543 {
544 r = unifi_coredump_read_zone(card, dump_buf->zone[i], &zonedef_table[i]);
545 }
546
547 return r;
548}
549
550
551/*
552 * ---------------------------------------------------------------------------
553 * unifi_coredump_from_sdio
554 *
555 * Capture the status of the UniFi processors, over SDIO
556 *
557 * Arguments:
558 * card Pointer to card struct
559 * reg_buffer Buffer into which register values will be dumped
560 *
561 * Returns:
562 * CSR_RESULT_SUCCESS on success, or:
563 * CSR_RESULT_FAILURE SDIO error
564 * CSR_WIFI_HIP_RESULT_INVALID_VALUE Parameter error
565 *
566 * Notes:
567 * ---------------------------------------------------------------------------
568 */
569static CsrResult unifi_coredump_from_sdio(card_t *card, coredump_buffer *dump_buf)
570{
571 u16 val;
572 CsrResult r;
573 u32 sdio_addr;
574
575 if (dump_buf == NULL)
576 {
577 r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
578 goto done;
579 }
580
581
582 /* Chip and firmware version */
583 unifi_trace(card->ospriv, UDBG4, "Get chip version\n");
584 sdio_addr = 2 * ChipHelper_GBL_CHIP_VERSION(card->helper);
585 if (sdio_addr != 0)
586 {
587 r = unifi_read_direct16(card, sdio_addr, &val);
588 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
589 {
590 goto done;
591 }
592 if (r != CSR_RESULT_SUCCESS)
593 {
594 unifi_error(card->ospriv, "Can't read GBL_CHIP_VERSION\n");
595 goto done;
596 }
597 }
598 dump_buf->chip_ver = val;
599 dump_buf->fw_ver = card->build_id;
600
601 unifi_trace(card->ospriv, UDBG4, "chip_ver 0x%04x, fw_ver %u\n",
602 dump_buf->chip_ver, dump_buf->fw_ver);
603
604 /* Capture the memory zones required from UniFi */
605 r = unifi_coredump_read_zones(card, dump_buf);
606 if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
607 {
608 goto done;
609 }
610 if (r != CSR_RESULT_SUCCESS)
611 {
612 unifi_error(card->ospriv, "Can't read UniFi memory areas\n");
613 goto done;
614 }
615
616done:
617 return r;
618} /* unifi_coredump_from_sdio() */
619
620
621#ifndef UNIFI_DISABLE_COREDUMP
622/*
623 * ---------------------------------------------------------------------------
624 * new_coredump_node
625 *
626 * Allocates a coredump linked-list node, and links it to the previous.
627 *
628 * Arguments:
629 * ospriv OS context
630 * prevnode Previous node to link into
631 *
632 * Returns:
633 * Pointer to valid coredump_buffer on success
634 * NULL on memory allocation failure
635 *
636 * Notes:
637 * Allocates "all or nothing"
638 * ---------------------------------------------------------------------------
639 */
640static
641coredump_buffer* new_coredump_node(void *ospriv, coredump_buffer *prevnode)
642{
643 coredump_buffer *newnode = NULL;
644 u16 *newzone = NULL;
645 s32 i;
646 u32 zone_size;
647
648 /* Allocate node header */
649 newnode = kzalloc(sizeof(coredump_buffer), GFP_KERNEL);
650 if (newnode == NULL)
651 {
652 return NULL;
653 }
654
655 /* Allocate chip memory zone capture buffers */
656 for (i = 0; i < HIP_CDUMP_NUM_ZONES; i++)
657 {
658 zone_size = sizeof(u16) * zonedef_table[i].length;
659 newzone = kzalloc(zone_size, GFP_KERNEL);
660 newnode->zone[i] = newzone;
661 if (newzone == NULL)
662 {
663 unifi_error(ospriv, "Out of memory on coredump zone %d (%d words)\n",
664 i, zonedef_table[i].length);
665 break;
666 }
667 }
668
669 /* Clean up if any zone alloc failed */
670 if (newzone == NULL)
671 {
672 for (i = 0; newnode->zone[i] != NULL; i++)
673 {
674 kfree(newnode->zone[i]);
675 newnode->zone[i] = NULL;
676 }
677 }
678
679 /* Link to previous node */
680 newnode->prev = prevnode;
681 if (prevnode)
682 {
683 prevnode->next = newnode;
684 }
685 newnode->next = NULL;
686
687 return newnode;
688}
689
690
691#endif /* UNIFI_DISABLE_COREDUMP */
692
693/*
694 * ---------------------------------------------------------------------------
695 * unifi_coredump_init
696 *
697 * Allocates buffers for the automatic SDIO core dump
698 *
699 * Arguments:
700 * card Pointer to card struct
701 * num_dump_buffers Number of buffers to reserve for coredumps
702 *
703 * Returns:
704 * CSR_RESULT_SUCCESS on success, or:
705 * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
706 *
707 * Notes:
708 * Allocates space in advance, to be used for the last n coredump buffers
709 * the intention being that the size is sufficient for at least one dump,
710 * probably several.
711 * It's probably advisable to have at least 2 coredump buffers to allow
712 * one to be enquired with the unifi_coredump tool, while leaving another
713 * free for capturing.
714 * ---------------------------------------------------------------------------
715 */
716CsrResult unifi_coredump_init(card_t *card, u16 num_dump_buffers)
717{
718#ifndef UNIFI_DISABLE_COREDUMP
719 void *ospriv = card->ospriv;
720 coredump_buffer *prev = NULL;
721 coredump_buffer *newnode = NULL;
722 u32 i = 0;
723#endif
724
725 card->request_coredump_on_reset = 0;
726 card->dump_next_write = NULL;
727 card->dump_cur_read = NULL;
728 card->dump_buf = NULL;
729
730#ifndef UNIFI_DISABLE_COREDUMP
731 unifi_trace(ospriv, UDBG1,
732 "Allocate buffers for %d core dumps\n", num_dump_buffers);
733 if (num_dump_buffers == 0)
734 {
735 goto done;
736 }
737
738 /* Root node */
739 card->dump_buf = new_coredump_node(ospriv, NULL);
740 if (card->dump_buf == NULL)
741 {
742 goto fail;
743 }
744 prev = card->dump_buf;
745 newnode = card->dump_buf;
746
747 /* Add each subsequent node at tail */
748 for (i = 1; i < num_dump_buffers; i++)
749 {
750 newnode = new_coredump_node(ospriv, prev);
751 if (newnode == NULL)
752 {
753 goto fail;
754 }
755 prev = newnode;
756 }
757
758 /* Link the first and last nodes to make the list circular */
759 card->dump_buf->prev = newnode;
760 newnode->next = card->dump_buf;
761
762 /* Set initial r/w access pointers */
763 card->dump_next_write = card->dump_buf;
764 card->dump_cur_read = NULL;
765
766 unifi_trace(ospriv, UDBG2, "Core dump configured (%d dumps max)\n", i);
767
768done:
769#endif
770 return CSR_RESULT_SUCCESS;
771
772#ifndef UNIFI_DISABLE_COREDUMP
773fail:
774 /* Unwind what we allocated so far */
775 unifi_error(ospriv, "Out of memory allocating core dump node %d\n", i);
776 unifi_coredump_free(card);
777 return CSR_WIFI_HIP_RESULT_NO_MEMORY;
778#endif
779} /* unifi_coreump_init() */
780
781
782/*
783 * ---------------------------------------------------------------------------
784 * unifi_coredump_free
785 *
786 * Free all memory dynamically allocated for core dump
787 *
788 * Arguments:
789 * card Pointer to card struct
790 *
791 * Returns:
792 * None
793 *
794 * Notes:
795 * ---------------------------------------------------------------------------
796 */
797void unifi_coredump_free(card_t *card)
798{
799 void *ospriv = card->ospriv;
800 coredump_buffer *node, *del_node;
801 s16 i = 0;
802 s16 j;
803
804 unifi_trace(ospriv, UDBG2, "Core dump de-configured\n");
805
806 if (card->dump_buf == NULL)
807 {
808 return;
809 }
810
811 node = card->dump_buf;
812 do
813 {
814 /* Free payload zones */
815 for (j = 0; j < HIP_CDUMP_NUM_ZONES; j++)
816 {
817 kfree(node->zone[j]);
818 node->zone[j] = NULL;
819 }
820
821 /* Detach */
822 del_node = node;
823 node = node->next;
824
825 /* Free header */
826 kfree(del_node);
827 i++;
828 } while ((node != NULL) && (node != card->dump_buf));
829
830 unifi_trace(ospriv, UDBG3, "Freed %d coredump buffers\n", i);
831
832 card->dump_buf = NULL;
833 card->dump_next_write = NULL;
834 card->dump_cur_read = NULL;
835} /* unifi_coredump_free() */
836
837
diff --git a/drivers/staging/csr/csr_wifi_hip_packing.c b/drivers/staging/csr/csr_wifi_hip_packing.c
deleted file mode 100644
index 0768aefc6d1f..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_packing.c
+++ /dev/null
@@ -1,4804 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/* Note: this is an auto-generated file. */
12
13#include "csr_wifi_hip_signals.h"
14#include "csr_wifi_hip_unifi.h"
15#include "csr_wifi_hip_conversions.h"
16
17
18/*
19 * ---------------------------------------------------------------------------
20 * get_packed_struct_size
21 *
22 * Examine a buffer containing a UniFi signal in wire-format.
23 * The first two bytes contain the signal ID, decode the signal ID and
24 * return the size, in bytes, of the signal, not including any bulk
25 * data.
26 *
27 * WARNING: This function is auto-generated, DO NOT EDIT!
28 *
29 * Arguments:
30 * buf Pointer to buffer to decode.
31 *
32 * Returns:
33 * 0 if the signal ID is not recognised (i.e. zero length),
34 * otherwise the number of bytes occupied by the signal in the buffer.
35 * This is useful for stepping past the signal to the object in the buffer.
36 * ---------------------------------------------------------------------------
37 */
38s32 get_packed_struct_size(const u8 *buf)
39{
40 s32 size = 0;
41 u16 sig_id;
42
43 sig_id = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(buf);
44
45 size += SIZEOF_UINT16;
46 size += SIZEOF_UINT16;
47 size += SIZEOF_UINT16;
48 switch (sig_id)
49 {
50#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
51 case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
52 size += SIZEOF_UINT16;
53 size += SIZEOF_UINT16;
54 size += SIZEOF_UINT16;
55 size += SIZEOF_UINT16;
56 size += SIZEOF_UINT16;
57 size += SIZEOF_UINT16;
58 break;
59#endif
60#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
61 case CSR_MLME_SETKEYS_CONFIRM_ID:
62 size += SIZEOF_UINT16;
63 size += SIZEOF_UINT16;
64 size += SIZEOF_UINT16;
65 size += SIZEOF_UINT16;
66 size += SIZEOF_UINT16;
67 size += SIZEOF_UINT16;
68 break;
69#endif
70#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
71 case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
72 size += SIZEOF_UINT16;
73 size += SIZEOF_UINT16;
74 size += SIZEOF_UINT16;
75 size += SIZEOF_UINT16;
76 size += SIZEOF_UINT16;
77 break;
78#endif
79#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
80 case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
81 size += SIZEOF_UINT16;
82 size += SIZEOF_UINT16;
83 size += SIZEOF_UINT16;
84 size += SIZEOF_UINT16;
85 size += SIZEOF_UINT16;
86 size += SIZEOF_UINT16;
87 size += SIZEOF_UINT16;
88 break;
89#endif
90#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
91 case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
92 size += SIZEOF_UINT16;
93 size += SIZEOF_UINT16;
94 size += SIZEOF_UINT16;
95 size += SIZEOF_UINT16;
96 size += SIZEOF_UINT16;
97 size += SIZEOF_UINT16;
98 size += SIZEOF_UINT16;
99 break;
100#endif
101#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
102 case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
103 size += SIZEOF_UINT16;
104 size += SIZEOF_UINT16;
105 size += SIZEOF_UINT16;
106 size += SIZEOF_UINT16;
107 size += SIZEOF_UINT16;
108 size += SIZEOF_UINT16;
109 break;
110#endif
111#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
112 case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
113 size += SIZEOF_UINT16;
114 size += SIZEOF_UINT16;
115 size += SIZEOF_UINT16;
116 size += SIZEOF_UINT16;
117 size += SIZEOF_UINT16;
118 size += SIZEOF_UINT16;
119 size += SIZEOF_UINT16;
120 size += SIZEOF_UINT16;
121 size += SIZEOF_UINT16;
122 size += SIZEOF_UINT16;
123 size += SIZEOF_UINT16;
124 size += SIZEOF_UINT16;
125 size += SIZEOF_UINT16;
126 size += SIZEOF_UINT16;
127 break;
128#endif
129#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
130 case CSR_MLME_SM_START_CONFIRM_ID:
131 size += SIZEOF_UINT16;
132 size += SIZEOF_UINT16;
133 size += SIZEOF_UINT16;
134 size += SIZEOF_UINT16;
135 size += SIZEOF_UINT16;
136 size += SIZEOF_UINT16;
137 break;
138#endif
139#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
140 case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
141 size += SIZEOF_UINT16;
142 size += SIZEOF_UINT16;
143 size += SIZEOF_UINT16;
144 size += SIZEOF_UINT16;
145 size += SIZEOF_UINT16;
146 size += 48 / 8;
147 size += SIZEOF_UINT16;
148 size += SIZEOF_UINT16;
149 size += SIZEOF_UINT16;
150 break;
151#endif
152#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
153 case CSR_MLME_DEL_TSPEC_REQUEST_ID:
154 size += SIZEOF_UINT16;
155 size += SIZEOF_UINT16;
156 size += SIZEOF_UINT16;
157 size += SIZEOF_UINT16;
158 size += SIZEOF_UINT16;
159 size += SIZEOF_UINT16;
160 size += SIZEOF_UINT16;
161 break;
162#endif
163 case CSR_DEBUG_WORD16_INDICATION_ID:
164 size += SIZEOF_UINT16;
165 size += SIZEOF_UINT16;
166 size += SIZEOF_UINT16;
167 size += SIZEOF_UINT16;
168 size += SIZEOF_UINT16;
169 size += SIZEOF_UINT16;
170 size += SIZEOF_UINT16;
171 size += SIZEOF_UINT16;
172 size += SIZEOF_UINT16;
173 size += SIZEOF_UINT16;
174 size += SIZEOF_UINT16;
175 size += SIZEOF_UINT16;
176 size += SIZEOF_UINT16;
177 size += SIZEOF_UINT16;
178 size += SIZEOF_UINT16;
179 size += SIZEOF_UINT16;
180 size += SIZEOF_UINT16;
181 size += SIZEOF_UINT16;
182 size += SIZEOF_UINT16;
183 size += SIZEOF_UINT16;
184 break;
185 case CSR_DEBUG_GENERIC_CONFIRM_ID:
186 size += SIZEOF_UINT16;
187 size += SIZEOF_UINT16;
188 size += SIZEOF_UINT16;
189 size += SIZEOF_UINT16;
190 size += SIZEOF_UINT16;
191 size += SIZEOF_UINT16;
192 size += SIZEOF_UINT16;
193 size += SIZEOF_UINT16;
194 size += SIZEOF_UINT16;
195 size += SIZEOF_UINT16;
196 size += SIZEOF_UINT16;
197 size += SIZEOF_UINT16;
198 break;
199 case CSR_MA_PACKET_INDICATION_ID:
200 size += SIZEOF_UINT16;
201 size += SIZEOF_UINT16;
202 size += SIZEOF_UINT16;
203 size += SIZEOF_UINT16;
204 size += SIZEOF_UINT16;
205 size += SIZEOF_UINT64;
206 size += SIZEOF_UINT16;
207 size += SIZEOF_UINT16;
208 size += SIZEOF_UINT16;
209 size += SIZEOF_UINT16;
210 size += SIZEOF_UINT16;
211 size += SIZEOF_UINT16;
212 break;
213 case CSR_MLME_SET_TIM_REQUEST_ID:
214 size += SIZEOF_UINT16;
215 size += SIZEOF_UINT16;
216 size += SIZEOF_UINT16;
217 size += SIZEOF_UINT16;
218 size += SIZEOF_UINT16;
219 size += SIZEOF_UINT16;
220 size += SIZEOF_UINT16;
221 break;
222#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
223 case CSR_MLME_CONNECTED_INDICATION_ID:
224 size += SIZEOF_UINT16;
225 size += SIZEOF_UINT16;
226 size += SIZEOF_UINT16;
227 size += SIZEOF_UINT16;
228 size += SIZEOF_UINT16;
229 size += SIZEOF_UINT16;
230 size += 48 / 8;
231 break;
232#endif
233#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
234 case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
235 size += SIZEOF_UINT16;
236 size += SIZEOF_UINT16;
237 size += SIZEOF_UINT16;
238 size += SIZEOF_UINT16;
239 size += SIZEOF_UINT16;
240 size += SIZEOF_UINT16;
241 break;
242#endif
243#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
244 case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
245 size += SIZEOF_UINT16;
246 size += SIZEOF_UINT16;
247 size += SIZEOF_UINT16;
248 size += SIZEOF_UINT16;
249 size += SIZEOF_UINT16;
250 size += SIZEOF_UINT16;
251 size += SIZEOF_UINT16;
252 size += SIZEOF_UINT16;
253 break;
254#endif
255#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
256 case CSR_MLME_SCAN_REQUEST_ID:
257 size += SIZEOF_UINT16;
258 size += SIZEOF_UINT16;
259 size += SIZEOF_UINT16;
260 size += SIZEOF_UINT16;
261 size += SIZEOF_UINT16;
262 size += SIZEOF_UINT16;
263 size += SIZEOF_UINT16;
264 size += SIZEOF_UINT32;
265 size += SIZEOF_UINT16;
266 size += SIZEOF_UINT16;
267 break;
268#endif
269#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
270 case CSR_MLME_DELETEKEYS_CONFIRM_ID:
271 size += SIZEOF_UINT16;
272 size += SIZEOF_UINT16;
273 size += SIZEOF_UINT16;
274 size += SIZEOF_UINT16;
275 size += SIZEOF_UINT16;
276 size += SIZEOF_UINT16;
277 break;
278#endif
279#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
280 case CSR_MLME_GET_NEXT_REQUEST_ID:
281 size += SIZEOF_UINT16;
282 size += SIZEOF_UINT16;
283 size += SIZEOF_UINT16;
284 size += SIZEOF_UINT16;
285 break;
286#endif
287#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
288 case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
289 size += SIZEOF_UINT16;
290 size += SIZEOF_UINT16;
291 size += SIZEOF_UINT16;
292 size += SIZEOF_UINT16;
293 size += SIZEOF_UINT16;
294 size += SIZEOF_UINT16;
295 break;
296#endif
297#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
298 case CSR_MLME_START_AGGREGATION_REQUEST_ID:
299 size += SIZEOF_UINT16;
300 size += SIZEOF_UINT16;
301 size += SIZEOF_UINT16;
302 size += SIZEOF_UINT16;
303 size += SIZEOF_UINT16;
304 size += 48 / 8;
305 size += SIZEOF_UINT16;
306 size += SIZEOF_UINT16;
307 size += SIZEOF_UINT16;
308 size += SIZEOF_UINT16;
309 size += SIZEOF_UINT16;
310 break;
311#endif
312#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
313 case CSR_MLME_HL_SYNC_REQUEST_ID:
314 size += SIZEOF_UINT16;
315 size += SIZEOF_UINT16;
316 size += SIZEOF_UINT16;
317 size += SIZEOF_UINT16;
318 size += 48 / 8;
319 break;
320#endif
321 case CSR_DEBUG_GENERIC_REQUEST_ID:
322 size += SIZEOF_UINT16;
323 size += SIZEOF_UINT16;
324 size += SIZEOF_UINT16;
325 size += SIZEOF_UINT16;
326 size += SIZEOF_UINT16;
327 size += SIZEOF_UINT16;
328 size += SIZEOF_UINT16;
329 size += SIZEOF_UINT16;
330 size += SIZEOF_UINT16;
331 size += SIZEOF_UINT16;
332 size += SIZEOF_UINT16;
333 size += SIZEOF_UINT16;
334 break;
335#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
336 case CSR_MLME_LEAVE_CONFIRM_ID:
337 size += SIZEOF_UINT16;
338 size += SIZEOF_UINT16;
339 size += SIZEOF_UINT16;
340 size += SIZEOF_UINT16;
341 size += SIZEOF_UINT16;
342 size += SIZEOF_UINT16;
343 break;
344#endif
345#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
346 case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
347 size += SIZEOF_UINT16;
348 size += SIZEOF_UINT16;
349 size += SIZEOF_UINT16;
350 size += SIZEOF_UINT16;
351 size += SIZEOF_UINT16;
352 size += SIZEOF_UINT16;
353 break;
354#endif
355#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
356 case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
357 size += SIZEOF_UINT16;
358 size += SIZEOF_UINT16;
359 size += SIZEOF_UINT16;
360 size += SIZEOF_UINT16;
361 size += SIZEOF_UINT16;
362 size += SIZEOF_UINT16;
363 break;
364#endif
365#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
366 case CSR_MLME_RESET_REQUEST_ID:
367 size += SIZEOF_UINT16;
368 size += SIZEOF_UINT16;
369 size += SIZEOF_UINT16;
370 size += SIZEOF_UINT16;
371 size += 48 / 8;
372 size += SIZEOF_UINT16;
373 break;
374#endif
375#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
376 case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
377 size += SIZEOF_UINT16;
378 size += SIZEOF_UINT16;
379 size += SIZEOF_UINT16;
380 size += SIZEOF_UINT16;
381 size += SIZEOF_UINT16;
382 break;
383#endif
384#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
385 case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
386 size += SIZEOF_UINT16;
387 size += SIZEOF_UINT16;
388 size += SIZEOF_UINT16;
389 size += SIZEOF_UINT16;
390 size += SIZEOF_UINT16;
391 size += SIZEOF_UINT16;
392 size += SIZEOF_UINT16;
393 break;
394#endif
395#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
396 case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
397 size += SIZEOF_UINT16;
398 size += SIZEOF_UINT16;
399 size += SIZEOF_UINT16;
400 size += SIZEOF_UINT16;
401 size += SIZEOF_UINT16;
402 size += SIZEOF_UINT16;
403 size += SIZEOF_UINT32;
404 break;
405#endif
406#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
407 case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
408 size += SIZEOF_UINT16;
409 size += SIZEOF_UINT16;
410 size += SIZEOF_UINT16;
411 size += SIZEOF_UINT16;
412 size += SIZEOF_UINT16;
413 size += SIZEOF_UINT16;
414 size += SIZEOF_UINT16;
415 break;
416#endif
417#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
418 case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
419 size += SIZEOF_UINT16;
420 size += SIZEOF_UINT16;
421 size += SIZEOF_UINT16;
422 size += SIZEOF_UINT16;
423 size += SIZEOF_UINT16;
424 size += SIZEOF_UINT16;
425 size += 48 / 8;
426 size += SIZEOF_UINT16;
427 size += SIZEOF_UINT16;
428 break;
429#endif
430#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
431 case CSR_MLME_LEAVE_REQUEST_ID:
432 size += SIZEOF_UINT16;
433 size += SIZEOF_UINT16;
434 size += SIZEOF_UINT16;
435 size += SIZEOF_UINT16;
436 size += SIZEOF_UINT16;
437 break;
438#endif
439#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
440 case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
441 size += SIZEOF_UINT16;
442 size += SIZEOF_UINT16;
443 size += SIZEOF_UINT16;
444 size += SIZEOF_UINT16;
445 size += SIZEOF_UINT16;
446 size += SIZEOF_UINT16;
447 size += SIZEOF_UINT16;
448 size += SIZEOF_UINT16;
449 size += SIZEOF_UINT16;
450 break;
451#endif
452#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
453 case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
454 size += SIZEOF_UINT16;
455 size += SIZEOF_UINT16;
456 size += SIZEOF_UINT16;
457 size += SIZEOF_UINT16;
458 size += SIZEOF_UINT16;
459 size += SIZEOF_UINT16;
460 size += SIZEOF_UINT16;
461 break;
462#endif
463 case CSR_MLME_SET_TIM_CONFIRM_ID:
464 size += SIZEOF_UINT16;
465 size += SIZEOF_UINT16;
466 size += SIZEOF_UINT16;
467 size += SIZEOF_UINT16;
468 size += SIZEOF_UINT16;
469 size += SIZEOF_UINT16;
470 break;
471#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
472 case CSR_MLME_MEASURE_INDICATION_ID:
473 size += SIZEOF_UINT16;
474 size += SIZEOF_UINT16;
475 size += SIZEOF_UINT16;
476 size += SIZEOF_UINT16;
477 size += SIZEOF_UINT16;
478 break;
479#endif
480#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
481 case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
482 size += SIZEOF_UINT16;
483 size += SIZEOF_UINT16;
484 size += SIZEOF_UINT16;
485 size += SIZEOF_UINT16;
486 size += SIZEOF_UINT16;
487 size += SIZEOF_UINT16;
488 size += SIZEOF_UINT16;
489 break;
490#endif
491#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
492 case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
493 size += SIZEOF_UINT16;
494 size += SIZEOF_UINT16;
495 size += SIZEOF_UINT16;
496 size += SIZEOF_UINT16;
497 size += SIZEOF_UINT16;
498 size += SIZEOF_UINT16;
499 size += SIZEOF_UINT16;
500 break;
501#endif
502 case CSR_DEBUG_GENERIC_INDICATION_ID:
503 size += SIZEOF_UINT16;
504 size += SIZEOF_UINT16;
505 size += SIZEOF_UINT16;
506 size += SIZEOF_UINT16;
507 size += SIZEOF_UINT16;
508 size += SIZEOF_UINT16;
509 size += SIZEOF_UINT16;
510 size += SIZEOF_UINT16;
511 size += SIZEOF_UINT16;
512 size += SIZEOF_UINT16;
513 size += SIZEOF_UINT16;
514 size += SIZEOF_UINT16;
515 break;
516 case CSR_MA_PACKET_CANCEL_REQUEST_ID:
517 size += SIZEOF_UINT16;
518 size += SIZEOF_UINT16;
519 size += SIZEOF_UINT16;
520 size += SIZEOF_UINT16;
521 size += SIZEOF_UINT16;
522 size += SIZEOF_UINT32;
523 break;
524#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
525 case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
526 size += SIZEOF_UINT16;
527 size += SIZEOF_UINT16;
528 size += SIZEOF_UINT16;
529 size += SIZEOF_UINT16;
530 size += SIZEOF_UINT16;
531 size += SIZEOF_UINT16;
532 break;
533#endif
534#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
535 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
536 size += SIZEOF_UINT16;
537 size += SIZEOF_UINT16;
538 size += SIZEOF_UINT16;
539 size += SIZEOF_UINT16;
540 size += SIZEOF_UINT16;
541 size += SIZEOF_UINT16;
542 size += SIZEOF_UINT16;
543 break;
544#endif
545 case CSR_MA_PACKET_REQUEST_ID:
546 size += SIZEOF_UINT16;
547 size += SIZEOF_UINT16;
548 size += SIZEOF_UINT16;
549 size += SIZEOF_UINT16;
550 size += SIZEOF_UINT16;
551 size += SIZEOF_UINT16;
552 size += SIZEOF_UINT32;
553 size += SIZEOF_UINT16;
554 size += 48 / 8;
555 size += SIZEOF_UINT16;
556 break;
557#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
558 case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
559 size += SIZEOF_UINT16;
560 size += SIZEOF_UINT16;
561 size += SIZEOF_UINT16;
562 size += SIZEOF_UINT16;
563 size += SIZEOF_UINT16;
564 size += SIZEOF_UINT16;
565 size += SIZEOF_UINT16;
566 size += SIZEOF_UINT16;
567 size += 48 / 8;
568 size += SIZEOF_UINT16;
569 break;
570#endif
571#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
572 case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
573 size += SIZEOF_UINT16;
574 size += SIZEOF_UINT16;
575 size += SIZEOF_UINT16;
576 size += SIZEOF_UINT16;
577 size += SIZEOF_UINT16;
578 size += SIZEOF_UINT16;
579 size += SIZEOF_UINT16;
580 break;
581#endif
582 case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
583 size += SIZEOF_UINT16;
584 size += SIZEOF_UINT16;
585 size += SIZEOF_UINT16;
586 size += SIZEOF_UINT16;
587 size += SIZEOF_UINT16;
588 size += SIZEOF_UINT16;
589 break;
590#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
591 case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
592 size += SIZEOF_UINT16;
593 size += SIZEOF_UINT16;
594 size += SIZEOF_UINT16;
595 size += SIZEOF_UINT16;
596 size += 48 / 8;
597 break;
598#endif
599#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
600 case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
601 size += SIZEOF_UINT16;
602 size += SIZEOF_UINT16;
603 size += SIZEOF_UINT16;
604 size += SIZEOF_UINT16;
605 size += SIZEOF_UINT16;
606 size += SIZEOF_UINT16;
607 break;
608#endif
609#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
610 case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
611 size += SIZEOF_UINT16;
612 size += SIZEOF_UINT16;
613 size += SIZEOF_UINT16;
614 size += SIZEOF_UINT16;
615 size += SIZEOF_UINT16;
616 size += SIZEOF_UINT16;
617 break;
618#endif
619#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
620 case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
621 size += SIZEOF_UINT16;
622 size += SIZEOF_UINT16;
623 size += SIZEOF_UINT16;
624 size += SIZEOF_UINT16;
625 size += SIZEOF_UINT16;
626 size += SIZEOF_UINT16;
627 size += SIZEOF_UINT16;
628 break;
629#endif
630#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
631 case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
632 size += SIZEOF_UINT16;
633 size += SIZEOF_UINT16;
634 size += SIZEOF_UINT16;
635 size += SIZEOF_UINT16;
636 size += SIZEOF_UINT16;
637 size += SIZEOF_UINT16;
638 size += SIZEOF_UINT16;
639 size += 48 / 8;
640 break;
641#endif
642#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
643 case CSR_MLME_SET_CHANNEL_REQUEST_ID:
644 size += SIZEOF_UINT16;
645 size += SIZEOF_UINT16;
646 size += SIZEOF_UINT16;
647 size += SIZEOF_UINT16;
648 size += SIZEOF_UINT16;
649 size += SIZEOF_UINT16;
650 size += SIZEOF_UINT16;
651 size += 48 / 8;
652 size += SIZEOF_UINT16;
653 size += SIZEOF_UINT16;
654 break;
655#endif
656#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
657 case CSR_MLME_MEASURE_CONFIRM_ID:
658 size += SIZEOF_UINT16;
659 size += SIZEOF_UINT16;
660 size += SIZEOF_UINT16;
661 size += SIZEOF_UINT16;
662 size += SIZEOF_UINT16;
663 size += SIZEOF_UINT16;
664 break;
665#endif
666#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
667 case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
668 size += SIZEOF_UINT16;
669 size += SIZEOF_UINT16;
670 size += SIZEOF_UINT16;
671 size += SIZEOF_UINT16;
672 size += SIZEOF_UINT16;
673 size += SIZEOF_UINT16;
674 break;
675#endif
676#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
677 case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
678 size += SIZEOF_UINT16;
679 size += SIZEOF_UINT16;
680 size += SIZEOF_UINT16;
681 size += SIZEOF_UINT16;
682 size += SIZEOF_UINT16;
683 size += 48 / 8;
684 break;
685#endif
686 case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
687 size += SIZEOF_UINT16;
688 size += SIZEOF_UINT16;
689 size += SIZEOF_UINT16;
690 size += SIZEOF_UINT16;
691 size += SIZEOF_UINT16;
692 size += SIZEOF_UINT16;
693 break;
694#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
695 case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
696 size += SIZEOF_UINT16;
697 size += SIZEOF_UINT16;
698 size += SIZEOF_UINT16;
699 size += SIZEOF_UINT16;
700 size += SIZEOF_UINT16;
701 size += SIZEOF_UINT16;
702 size += SIZEOF_UINT16;
703 break;
704#endif
705#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
706 case CSR_MLME_POWERMGT_CONFIRM_ID:
707 size += SIZEOF_UINT16;
708 size += SIZEOF_UINT16;
709 size += SIZEOF_UINT16;
710 size += SIZEOF_UINT16;
711 size += SIZEOF_UINT16;
712 size += SIZEOF_UINT16;
713 break;
714#endif
715#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
716 case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
717 size += SIZEOF_UINT16;
718 size += SIZEOF_UINT16;
719 size += SIZEOF_UINT16;
720 size += SIZEOF_UINT16;
721 size += SIZEOF_UINT16;
722 size += SIZEOF_UINT16;
723 size += SIZEOF_UINT16;
724 break;
725#endif
726#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
727 case CSR_MLME_GET_CONFIRM_ID:
728 size += SIZEOF_UINT16;
729 size += SIZEOF_UINT16;
730 size += SIZEOF_UINT16;
731 size += SIZEOF_UINT16;
732 size += SIZEOF_UINT16;
733 size += SIZEOF_UINT16;
734 break;
735#endif
736#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
737 case CSR_MLME_GET_NEXT_CONFIRM_ID:
738 size += SIZEOF_UINT16;
739 size += SIZEOF_UINT16;
740 size += SIZEOF_UINT16;
741 size += SIZEOF_UINT16;
742 size += SIZEOF_UINT16;
743 size += SIZEOF_UINT16;
744 break;
745#endif
746#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
747 case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
748 size += SIZEOF_UINT16;
749 size += SIZEOF_UINT16;
750 size += SIZEOF_UINT16;
751 size += SIZEOF_UINT16;
752 size += SIZEOF_UINT16;
753 size += 48 / 8;
754 size += SIZEOF_UINT16;
755 size += SIZEOF_UINT16;
756 break;
757#endif
758#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
759 case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
760 size += SIZEOF_UINT16;
761 size += SIZEOF_UINT16;
762 size += SIZEOF_UINT16;
763 size += SIZEOF_UINT16;
764 size += SIZEOF_UINT16;
765 size += SIZEOF_UINT16;
766 size += SIZEOF_UINT16;
767 break;
768#endif
769#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
770 case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
771 size += SIZEOF_UINT16;
772 size += SIZEOF_UINT16;
773 size += SIZEOF_UINT16;
774 size += SIZEOF_UINT16;
775 size += SIZEOF_UINT16;
776 size += SIZEOF_UINT16;
777 size += SIZEOF_UINT16;
778 size += SIZEOF_UINT16;
779 size += SIZEOF_UINT32;
780 size += SIZEOF_UINT32;
781 size += SIZEOF_UINT32;
782 size += 48 / 8;
783 size += SIZEOF_UINT16;
784 break;
785#endif
786#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
787 case CSR_MLME_DELETEKEYS_REQUEST_ID:
788 size += SIZEOF_UINT16;
789 size += SIZEOF_UINT16;
790 size += SIZEOF_UINT16;
791 size += SIZEOF_UINT16;
792 size += SIZEOF_UINT16;
793 size += SIZEOF_UINT16;
794 size += SIZEOF_UINT16;
795 size += 48 / 8;
796 break;
797#endif
798#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
799 case CSR_MLME_RESET_CONFIRM_ID:
800 size += SIZEOF_UINT16;
801 size += SIZEOF_UINT16;
802 size += SIZEOF_UINT16;
803 size += SIZEOF_UINT16;
804 size += SIZEOF_UINT16;
805 break;
806#endif
807#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
808 case CSR_MLME_HL_SYNC_CONFIRM_ID:
809 size += SIZEOF_UINT16;
810 size += SIZEOF_UINT16;
811 size += SIZEOF_UINT16;
812 size += SIZEOF_UINT16;
813 size += 48 / 8;
814 size += SIZEOF_UINT16;
815 break;
816#endif
817#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
818 case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
819 size += SIZEOF_UINT16;
820 size += SIZEOF_UINT16;
821 size += SIZEOF_UINT16;
822 size += SIZEOF_UINT16;
823 size += SIZEOF_UINT16;
824 size += SIZEOF_UINT16;
825 size += SIZEOF_UINT16;
826 size += SIZEOF_UINT16;
827 size += SIZEOF_UINT16;
828 size += SIZEOF_UINT32;
829 size += SIZEOF_UINT16;
830 size += SIZEOF_UINT16;
831 break;
832#endif
833#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
834 case CSR_MLME_SET_REQUEST_ID:
835 size += SIZEOF_UINT16;
836 size += SIZEOF_UINT16;
837 size += SIZEOF_UINT16;
838 size += SIZEOF_UINT16;
839 break;
840#endif
841#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
842 case CSR_MLME_SM_START_REQUEST_ID:
843 size += SIZEOF_UINT16;
844 size += SIZEOF_UINT16;
845 size += SIZEOF_UINT16;
846 size += SIZEOF_UINT16;
847 size += SIZEOF_UINT16;
848 size += SIZEOF_UINT16;
849 size += SIZEOF_UINT16;
850 size += 48 / 8;
851 size += 48 / 8;
852 size += SIZEOF_UINT16;
853 size += SIZEOF_UINT16;
854 size += SIZEOF_UINT16;
855 break;
856#endif
857#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
858 case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
859 size += SIZEOF_UINT16;
860 size += SIZEOF_UINT16;
861 size += SIZEOF_UINT16;
862 size += SIZEOF_UINT16;
863 size += SIZEOF_UINT16;
864 size += SIZEOF_UINT16;
865 break;
866#endif
867#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
868 case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
869 size += SIZEOF_UINT16;
870 size += SIZEOF_UINT16;
871 size += SIZEOF_UINT16;
872 size += SIZEOF_UINT16;
873 size += SIZEOF_UINT16;
874 size += SIZEOF_UINT16;
875 size += SIZEOF_UINT16;
876 break;
877#endif
878#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
879 case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
880 size += SIZEOF_UINT16;
881 size += SIZEOF_UINT16;
882 size += SIZEOF_UINT16;
883 size += SIZEOF_UINT16;
884 size += SIZEOF_UINT16;
885 size += SIZEOF_UINT16;
886 break;
887#endif
888#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
889 case CSR_MLME_SETKEYS_REQUEST_ID:
890 size += SIZEOF_UINT16;
891 size += SIZEOF_UINT16;
892 size += SIZEOF_UINT16;
893 size += SIZEOF_UINT16;
894 size += SIZEOF_UINT16;
895 size += SIZEOF_UINT16;
896 size += SIZEOF_UINT16;
897 size += SIZEOF_UINT16;
898 size += 48 / 8;
899 size += SIZEOF_UINT16;
900 size += SIZEOF_UINT16;
901 size += SIZEOF_UINT16;
902 size += SIZEOF_UINT16;
903 size += SIZEOF_UINT16;
904 size += SIZEOF_UINT16;
905 size += SIZEOF_UINT16;
906 size += SIZEOF_UINT16;
907 size += 32 / 8;
908 break;
909#endif
910#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
911 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
912 size += SIZEOF_UINT16;
913 size += SIZEOF_UINT16;
914 size += SIZEOF_UINT16;
915 size += SIZEOF_UINT16;
916 size += SIZEOF_UINT16;
917 size += SIZEOF_UINT16;
918 size += SIZEOF_UINT16;
919 break;
920#endif
921#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
922 case CSR_MLME_GET_REQUEST_ID:
923 size += SIZEOF_UINT16;
924 size += SIZEOF_UINT16;
925 size += SIZEOF_UINT16;
926 size += SIZEOF_UINT16;
927 break;
928#endif
929#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
930 case CSR_MLME_POWERMGT_REQUEST_ID:
931 size += SIZEOF_UINT16;
932 size += SIZEOF_UINT16;
933 size += SIZEOF_UINT16;
934 size += SIZEOF_UINT16;
935 size += SIZEOF_UINT16;
936 size += SIZEOF_UINT16;
937 size += SIZEOF_UINT16;
938 size += SIZEOF_UINT16;
939 size += SIZEOF_UINT16;
940 break;
941#endif
942 case CSR_MA_PACKET_ERROR_INDICATION_ID:
943 size += SIZEOF_UINT16;
944 size += SIZEOF_UINT16;
945 size += SIZEOF_UINT16;
946 size += SIZEOF_UINT16;
947 size += SIZEOF_UINT16;
948 size += 48 / 8;
949 size += SIZEOF_UINT16;
950 size += SIZEOF_UINT16;
951 break;
952#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
953 case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
954 size += SIZEOF_UINT16;
955 size += SIZEOF_UINT16;
956 size += SIZEOF_UINT16;
957 size += SIZEOF_UINT16;
958 size += SIZEOF_UINT16;
959 size += SIZEOF_UINT16;
960 size += SIZEOF_UINT32;
961 size += SIZEOF_UINT16;
962 size += SIZEOF_UINT16;
963 size += SIZEOF_UINT16;
964 break;
965#endif
966#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
967 case CSR_MLME_ADD_TSPEC_REQUEST_ID:
968 size += SIZEOF_UINT16;
969 size += SIZEOF_UINT16;
970 size += SIZEOF_UINT16;
971 size += SIZEOF_UINT16;
972 size += SIZEOF_UINT16;
973 size += SIZEOF_UINT16;
974 size += SIZEOF_UINT16;
975 size += SIZEOF_UINT16;
976 size += SIZEOF_UINT16;
977 size += SIZEOF_UINT32;
978 size += SIZEOF_UINT32;
979 size += SIZEOF_UINT16;
980 break;
981#endif
982#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
983 case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
984 size += SIZEOF_UINT16;
985 size += SIZEOF_UINT16;
986 size += SIZEOF_UINT16;
987 size += SIZEOF_UINT16;
988 size += SIZEOF_UINT16;
989 size += SIZEOF_UINT16;
990 break;
991#endif
992#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
993 case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
994 size += SIZEOF_UINT16;
995 size += SIZEOF_UINT16;
996 size += SIZEOF_UINT16;
997 size += SIZEOF_UINT16;
998 size += SIZEOF_UINT16;
999 size += SIZEOF_UINT16;
1000 size += SIZEOF_UINT16;
1001 break;
1002#endif
1003#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1004 case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
1005 size += SIZEOF_UINT16;
1006 size += SIZEOF_UINT16;
1007 size += SIZEOF_UINT16;
1008 size += SIZEOF_UINT16;
1009 size += SIZEOF_UINT16;
1010 break;
1011#endif
1012#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1013 case CSR_MLME_SCAN_CONFIRM_ID:
1014 size += SIZEOF_UINT16;
1015 size += SIZEOF_UINT16;
1016 size += SIZEOF_UINT16;
1017 size += SIZEOF_UINT16;
1018 size += SIZEOF_UINT16;
1019 size += SIZEOF_UINT16;
1020 break;
1021#endif
1022 case CSR_DEBUG_STRING_INDICATION_ID:
1023 size += SIZEOF_UINT16;
1024 size += SIZEOF_UINT16;
1025 size += SIZEOF_UINT16;
1026 size += SIZEOF_UINT16;
1027 break;
1028#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1029 case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
1030 size += SIZEOF_UINT16;
1031 size += SIZEOF_UINT16;
1032 size += SIZEOF_UINT16;
1033 size += SIZEOF_UINT16;
1034 size += SIZEOF_UINT16;
1035 size += SIZEOF_UINT16;
1036 size += SIZEOF_UINT16;
1037 break;
1038#endif
1039#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1040 case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
1041 size += SIZEOF_UINT16;
1042 size += SIZEOF_UINT16;
1043 size += SIZEOF_UINT16;
1044 size += SIZEOF_UINT16;
1045 size += SIZEOF_UINT16;
1046 size += SIZEOF_UINT16;
1047 size += 48 / 8;
1048 break;
1049#endif
1050#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1051 case CSR_MLME_SET_CONFIRM_ID:
1052 size += SIZEOF_UINT16;
1053 size += SIZEOF_UINT16;
1054 size += SIZEOF_UINT16;
1055 size += SIZEOF_UINT16;
1056 size += SIZEOF_UINT16;
1057 size += SIZEOF_UINT16;
1058 break;
1059#endif
1060#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1061 case CSR_MLME_MEASURE_REQUEST_ID:
1062 size += SIZEOF_UINT16;
1063 size += SIZEOF_UINT16;
1064 size += SIZEOF_UINT16;
1065 size += SIZEOF_UINT16;
1066 size += SIZEOF_UINT16;
1067 break;
1068#endif
1069#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1070 case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
1071 size += SIZEOF_UINT16;
1072 size += SIZEOF_UINT16;
1073 size += SIZEOF_UINT16;
1074 size += SIZEOF_UINT16;
1075 size += SIZEOF_UINT16;
1076 size += 48 / 8;
1077 size += SIZEOF_UINT16;
1078 size += SIZEOF_UINT16;
1079 size += SIZEOF_UINT16;
1080 size += SIZEOF_UINT16;
1081 break;
1082#endif
1083#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1084 case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
1085 size += SIZEOF_UINT16;
1086 size += SIZEOF_UINT16;
1087 size += SIZEOF_UINT16;
1088 size += SIZEOF_UINT16;
1089 size += SIZEOF_UINT16;
1090 size += SIZEOF_UINT16;
1091 break;
1092#endif
1093 case CSR_MA_PACKET_CONFIRM_ID:
1094 size += SIZEOF_UINT16;
1095 size += SIZEOF_UINT16;
1096 size += SIZEOF_UINT16;
1097 size += SIZEOF_UINT16;
1098 size += SIZEOF_UINT16;
1099 size += SIZEOF_UINT16;
1100 size += SIZEOF_UINT16;
1101 size += SIZEOF_UINT16;
1102 size += SIZEOF_UINT32;
1103 break;
1104#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1105 case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
1106 size += SIZEOF_UINT16;
1107 size += SIZEOF_UINT16;
1108 size += SIZEOF_UINT16;
1109 size += SIZEOF_UINT16;
1110 size += SIZEOF_UINT16;
1111 size += SIZEOF_UINT16;
1112 size += SIZEOF_UINT16;
1113 break;
1114#endif
1115#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1116 case CSR_MLME_STOP_MEASURE_REQUEST_ID:
1117 size += SIZEOF_UINT16;
1118 size += SIZEOF_UINT16;
1119 size += SIZEOF_UINT16;
1120 size += SIZEOF_UINT16;
1121 size += SIZEOF_UINT16;
1122 break;
1123#endif
1124 default:
1125 size = 0;
1126 }
1127 return size;
1128} /* get_packed_struct_size() */
1129
1130
1131/*
1132 * ---------------------------------------------------------------------------
1133 * read_unpack_signal
1134 *
1135 * Unpack a wire-format signal into a host-native structure.
1136 * This function handles any necessary conversions for endianness and
1137 * places no restrictions on packing or alignment for the structure
1138 * definition.
1139 *
1140 * WARNING: This function is auto-generated, DO NOT EDIT!
1141 *
1142 * Arguments:
1143 * ptr Signal buffer to unpack.
1144 * sig Pointer to destination structure to populate.
1145 *
1146 * Returns:
1147 * CSR_RESULT_SUCCESS on success,
1148 * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the ID of signal was not recognised.
1149 * ---------------------------------------------------------------------------
1150 */
1151CsrResult read_unpack_signal(const u8 *ptr, CSR_SIGNAL *sig)
1152{
1153 s32 index = 0;
1154
1155 sig->SignalPrimitiveHeader.SignalId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1156 index += SIZEOF_UINT16;
1157
1158 sig->SignalPrimitiveHeader.ReceiverProcessId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1159 index += SIZEOF_UINT16;
1160
1161 sig->SignalPrimitiveHeader.SenderProcessId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1162 index += SIZEOF_UINT16;
1163
1164 switch (sig->SignalPrimitiveHeader.SignalId)
1165 {
1166#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1167 case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
1168 sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1169 index += SIZEOF_UINT16;
1170 sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1171 index += SIZEOF_UINT16;
1172 sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1173 index += SIZEOF_UINT16;
1174 sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1175 index += SIZEOF_UINT16;
1176 sig->u.MlmeSetPacketFilterConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1177 index += SIZEOF_UINT16;
1178 sig->u.MlmeSetPacketFilterConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1179 index += SIZEOF_UINT16;
1180 break;
1181#endif
1182#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1183 case CSR_MLME_SETKEYS_CONFIRM_ID:
1184 sig->u.MlmeSetkeysConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1185 index += SIZEOF_UINT16;
1186 sig->u.MlmeSetkeysConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1187 index += SIZEOF_UINT16;
1188 sig->u.MlmeSetkeysConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1189 index += SIZEOF_UINT16;
1190 sig->u.MlmeSetkeysConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1191 index += SIZEOF_UINT16;
1192 sig->u.MlmeSetkeysConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1193 index += SIZEOF_UINT16;
1194 sig->u.MlmeSetkeysConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1195 index += SIZEOF_UINT16;
1196 break;
1197#endif
1198#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1199 case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
1200 sig->u.MlmeConfigQueueConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1201 index += SIZEOF_UINT16;
1202 sig->u.MlmeConfigQueueConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1203 index += SIZEOF_UINT16;
1204 sig->u.MlmeConfigQueueConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1205 index += SIZEOF_UINT16;
1206 sig->u.MlmeConfigQueueConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1207 index += SIZEOF_UINT16;
1208 sig->u.MlmeConfigQueueConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1209 index += SIZEOF_UINT16;
1210 break;
1211#endif
1212#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1213 case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
1214 sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1215 index += SIZEOF_UINT16;
1216 sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1217 index += SIZEOF_UINT16;
1218 sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1219 index += SIZEOF_UINT16;
1220 sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1221 index += SIZEOF_UINT16;
1222 sig->u.MlmeAddAutonomousScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1223 index += SIZEOF_UINT16;
1224 sig->u.MlmeAddAutonomousScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1225 index += SIZEOF_UINT16;
1226 sig->u.MlmeAddAutonomousScanConfirm.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1227 index += SIZEOF_UINT16;
1228 break;
1229#endif
1230#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1231 case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
1232 sig->u.MlmeAddBlackoutConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1233 index += SIZEOF_UINT16;
1234 sig->u.MlmeAddBlackoutConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1235 index += SIZEOF_UINT16;
1236 sig->u.MlmeAddBlackoutConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1237 index += SIZEOF_UINT16;
1238 sig->u.MlmeAddBlackoutConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1239 index += SIZEOF_UINT16;
1240 sig->u.MlmeAddBlackoutConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1241 index += SIZEOF_UINT16;
1242 sig->u.MlmeAddBlackoutConfirm.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1243 index += SIZEOF_UINT16;
1244 sig->u.MlmeAddBlackoutConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1245 index += SIZEOF_UINT16;
1246 break;
1247#endif
1248#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1249 case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
1250 sig->u.MlmeDelBlackoutRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1251 index += SIZEOF_UINT16;
1252 sig->u.MlmeDelBlackoutRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1253 index += SIZEOF_UINT16;
1254 sig->u.MlmeDelBlackoutRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1255 index += SIZEOF_UINT16;
1256 sig->u.MlmeDelBlackoutRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1257 index += SIZEOF_UINT16;
1258 sig->u.MlmeDelBlackoutRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1259 index += SIZEOF_UINT16;
1260 sig->u.MlmeDelBlackoutRequest.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1261 index += SIZEOF_UINT16;
1262 break;
1263#endif
1264#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1265 case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
1266 sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1267 index += SIZEOF_UINT16;
1268 sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1269 index += SIZEOF_UINT16;
1270 sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1271 index += SIZEOF_UINT16;
1272 sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1273 index += SIZEOF_UINT16;
1274 sig->u.MlmeGetKeySequenceConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1275 index += SIZEOF_UINT16;
1276 sig->u.MlmeGetKeySequenceConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1277 index += SIZEOF_UINT16;
1278 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1279 index += SIZEOF_UINT16;
1280 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1281 index += SIZEOF_UINT16;
1282 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1283 index += SIZEOF_UINT16;
1284 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1285 index += SIZEOF_UINT16;
1286 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1287 index += SIZEOF_UINT16;
1288 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1289 index += SIZEOF_UINT16;
1290 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1291 index += SIZEOF_UINT16;
1292 sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1293 index += SIZEOF_UINT16;
1294 break;
1295#endif
1296#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1297 case CSR_MLME_SM_START_CONFIRM_ID:
1298 sig->u.MlmeSmStartConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1299 index += SIZEOF_UINT16;
1300 sig->u.MlmeSmStartConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1301 index += SIZEOF_UINT16;
1302 sig->u.MlmeSmStartConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1303 index += SIZEOF_UINT16;
1304 sig->u.MlmeSmStartConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1305 index += SIZEOF_UINT16;
1306 sig->u.MlmeSmStartConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1307 index += SIZEOF_UINT16;
1308 sig->u.MlmeSmStartConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1309 index += SIZEOF_UINT16;
1310 break;
1311#endif
1312#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1313 case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
1314 sig->u.MlmeStopAggregationConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1315 index += SIZEOF_UINT16;
1316 sig->u.MlmeStopAggregationConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1317 index += SIZEOF_UINT16;
1318 sig->u.MlmeStopAggregationConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1319 index += SIZEOF_UINT16;
1320 sig->u.MlmeStopAggregationConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1321 index += SIZEOF_UINT16;
1322 sig->u.MlmeStopAggregationConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1323 index += SIZEOF_UINT16;
1324 memcpy(sig->u.MlmeStopAggregationConfirm.PeerQstaAddress.x, &ptr[index], 48 / 8);
1325 index += 48 / 8;
1326 sig->u.MlmeStopAggregationConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1327 index += SIZEOF_UINT16;
1328 sig->u.MlmeStopAggregationConfirm.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1329 index += SIZEOF_UINT16;
1330 sig->u.MlmeStopAggregationConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1331 index += SIZEOF_UINT16;
1332 break;
1333#endif
1334#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1335 case CSR_MLME_DEL_TSPEC_REQUEST_ID:
1336 sig->u.MlmeDelTspecRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1337 index += SIZEOF_UINT16;
1338 sig->u.MlmeDelTspecRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1339 index += SIZEOF_UINT16;
1340 sig->u.MlmeDelTspecRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1341 index += SIZEOF_UINT16;
1342 sig->u.MlmeDelTspecRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1343 index += SIZEOF_UINT16;
1344 sig->u.MlmeDelTspecRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1345 index += SIZEOF_UINT16;
1346 sig->u.MlmeDelTspecRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1347 index += SIZEOF_UINT16;
1348 sig->u.MlmeDelTspecRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1349 index += SIZEOF_UINT16;
1350 break;
1351#endif
1352 case CSR_DEBUG_WORD16_INDICATION_ID:
1353 sig->u.DebugWord16Indication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1354 index += SIZEOF_UINT16;
1355 sig->u.DebugWord16Indication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1356 index += SIZEOF_UINT16;
1357 sig->u.DebugWord16Indication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1358 index += SIZEOF_UINT16;
1359 sig->u.DebugWord16Indication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1360 index += SIZEOF_UINT16;
1361 sig->u.DebugWord16Indication.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1362 index += SIZEOF_UINT16;
1363 sig->u.DebugWord16Indication.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1364 index += SIZEOF_UINT16;
1365 sig->u.DebugWord16Indication.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1366 index += SIZEOF_UINT16;
1367 sig->u.DebugWord16Indication.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1368 index += SIZEOF_UINT16;
1369 sig->u.DebugWord16Indication.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1370 index += SIZEOF_UINT16;
1371 sig->u.DebugWord16Indication.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1372 index += SIZEOF_UINT16;
1373 sig->u.DebugWord16Indication.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1374 index += SIZEOF_UINT16;
1375 sig->u.DebugWord16Indication.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1376 index += SIZEOF_UINT16;
1377 sig->u.DebugWord16Indication.DebugWords[8] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1378 index += SIZEOF_UINT16;
1379 sig->u.DebugWord16Indication.DebugWords[9] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1380 index += SIZEOF_UINT16;
1381 sig->u.DebugWord16Indication.DebugWords[10] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1382 index += SIZEOF_UINT16;
1383 sig->u.DebugWord16Indication.DebugWords[11] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1384 index += SIZEOF_UINT16;
1385 sig->u.DebugWord16Indication.DebugWords[12] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1386 index += SIZEOF_UINT16;
1387 sig->u.DebugWord16Indication.DebugWords[13] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1388 index += SIZEOF_UINT16;
1389 sig->u.DebugWord16Indication.DebugWords[14] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1390 index += SIZEOF_UINT16;
1391 sig->u.DebugWord16Indication.DebugWords[15] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1392 index += SIZEOF_UINT16;
1393 break;
1394 case CSR_DEBUG_GENERIC_CONFIRM_ID:
1395 sig->u.DebugGenericConfirm.DebugVariable.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1396 index += SIZEOF_UINT16;
1397 sig->u.DebugGenericConfirm.DebugVariable.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1398 index += SIZEOF_UINT16;
1399 sig->u.DebugGenericConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1400 index += SIZEOF_UINT16;
1401 sig->u.DebugGenericConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1402 index += SIZEOF_UINT16;
1403 sig->u.DebugGenericConfirm.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1404 index += SIZEOF_UINT16;
1405 sig->u.DebugGenericConfirm.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1406 index += SIZEOF_UINT16;
1407 sig->u.DebugGenericConfirm.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1408 index += SIZEOF_UINT16;
1409 sig->u.DebugGenericConfirm.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1410 index += SIZEOF_UINT16;
1411 sig->u.DebugGenericConfirm.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1412 index += SIZEOF_UINT16;
1413 sig->u.DebugGenericConfirm.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1414 index += SIZEOF_UINT16;
1415 sig->u.DebugGenericConfirm.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1416 index += SIZEOF_UINT16;
1417 sig->u.DebugGenericConfirm.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1418 index += SIZEOF_UINT16;
1419 break;
1420 case CSR_MA_PACKET_INDICATION_ID:
1421 sig->u.MaPacketIndication.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1422 index += SIZEOF_UINT16;
1423 sig->u.MaPacketIndication.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1424 index += SIZEOF_UINT16;
1425 sig->u.MaPacketIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1426 index += SIZEOF_UINT16;
1427 sig->u.MaPacketIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1428 index += SIZEOF_UINT16;
1429 sig->u.MaPacketIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1430 index += SIZEOF_UINT16;
1431 memcpy(sig->u.MaPacketIndication.LocalTime.x, &ptr[index], 64 / 8);
1432 index += 64 / 8;
1433 sig->u.MaPacketIndication.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1434 index += SIZEOF_UINT16;
1435 sig->u.MaPacketIndication.Channel = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1436 index += SIZEOF_UINT16;
1437 sig->u.MaPacketIndication.ReceptionStatus = (CSR_RECEPTION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1438 index += SIZEOF_UINT16;
1439 sig->u.MaPacketIndication.Rssi = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1440 index += SIZEOF_UINT16;
1441 sig->u.MaPacketIndication.Snr = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1442 index += SIZEOF_UINT16;
1443 sig->u.MaPacketIndication.ReceivedRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1444 index += SIZEOF_UINT16;
1445 break;
1446 case CSR_MLME_SET_TIM_REQUEST_ID:
1447 sig->u.MlmeSetTimRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1448 index += SIZEOF_UINT16;
1449 sig->u.MlmeSetTimRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1450 index += SIZEOF_UINT16;
1451 sig->u.MlmeSetTimRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1452 index += SIZEOF_UINT16;
1453 sig->u.MlmeSetTimRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1454 index += SIZEOF_UINT16;
1455 sig->u.MlmeSetTimRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1456 index += SIZEOF_UINT16;
1457 sig->u.MlmeSetTimRequest.AssociationId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1458 index += SIZEOF_UINT16;
1459 sig->u.MlmeSetTimRequest.TimValue = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1460 index += SIZEOF_UINT16;
1461 break;
1462#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1463 case CSR_MLME_CONNECTED_INDICATION_ID:
1464 sig->u.MlmeConnectedIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1465 index += SIZEOF_UINT16;
1466 sig->u.MlmeConnectedIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1467 index += SIZEOF_UINT16;
1468 sig->u.MlmeConnectedIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1469 index += SIZEOF_UINT16;
1470 sig->u.MlmeConnectedIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1471 index += SIZEOF_UINT16;
1472 sig->u.MlmeConnectedIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1473 index += SIZEOF_UINT16;
1474 sig->u.MlmeConnectedIndication.ConnectionStatus = (CSR_CONNECTION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1475 index += SIZEOF_UINT16;
1476 memcpy(sig->u.MlmeConnectedIndication.PeerMacAddress.x, &ptr[index], 48 / 8);
1477 index += 48 / 8;
1478 break;
1479#endif
1480#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1481 case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
1482 sig->u.MlmeDelRxTriggerRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1483 index += SIZEOF_UINT16;
1484 sig->u.MlmeDelRxTriggerRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1485 index += SIZEOF_UINT16;
1486 sig->u.MlmeDelRxTriggerRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1487 index += SIZEOF_UINT16;
1488 sig->u.MlmeDelRxTriggerRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1489 index += SIZEOF_UINT16;
1490 sig->u.MlmeDelRxTriggerRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1491 index += SIZEOF_UINT16;
1492 sig->u.MlmeDelRxTriggerRequest.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1493 index += SIZEOF_UINT16;
1494 break;
1495#endif
1496#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1497 case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
1498 sig->u.MlmeTriggeredGetIndication.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1499 index += SIZEOF_UINT16;
1500 sig->u.MlmeTriggeredGetIndication.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1501 index += SIZEOF_UINT16;
1502 sig->u.MlmeTriggeredGetIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1503 index += SIZEOF_UINT16;
1504 sig->u.MlmeTriggeredGetIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1505 index += SIZEOF_UINT16;
1506 sig->u.MlmeTriggeredGetIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1507 index += SIZEOF_UINT16;
1508 sig->u.MlmeTriggeredGetIndication.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1509 index += SIZEOF_UINT16;
1510 sig->u.MlmeTriggeredGetIndication.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1511 index += SIZEOF_UINT16;
1512 sig->u.MlmeTriggeredGetIndication.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1513 index += SIZEOF_UINT16;
1514 break;
1515#endif
1516#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1517 case CSR_MLME_SCAN_REQUEST_ID:
1518 sig->u.MlmeScanRequest.ChannelList.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1519 index += SIZEOF_UINT16;
1520 sig->u.MlmeScanRequest.ChannelList.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1521 index += SIZEOF_UINT16;
1522 sig->u.MlmeScanRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1523 index += SIZEOF_UINT16;
1524 sig->u.MlmeScanRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1525 index += SIZEOF_UINT16;
1526 sig->u.MlmeScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1527 index += SIZEOF_UINT16;
1528 sig->u.MlmeScanRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1529 index += SIZEOF_UINT16;
1530 sig->u.MlmeScanRequest.ScanType = (CSR_SCAN_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1531 index += SIZEOF_UINT16;
1532 sig->u.MlmeScanRequest.ProbeDelay = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
1533 index += SIZEOF_UINT32;
1534 sig->u.MlmeScanRequest.MinChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1535 index += SIZEOF_UINT16;
1536 sig->u.MlmeScanRequest.MaxChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1537 index += SIZEOF_UINT16;
1538 break;
1539#endif
1540#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1541 case CSR_MLME_DELETEKEYS_CONFIRM_ID:
1542 sig->u.MlmeDeletekeysConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1543 index += SIZEOF_UINT16;
1544 sig->u.MlmeDeletekeysConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1545 index += SIZEOF_UINT16;
1546 sig->u.MlmeDeletekeysConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1547 index += SIZEOF_UINT16;
1548 sig->u.MlmeDeletekeysConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1549 index += SIZEOF_UINT16;
1550 sig->u.MlmeDeletekeysConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1551 index += SIZEOF_UINT16;
1552 sig->u.MlmeDeletekeysConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1553 index += SIZEOF_UINT16;
1554 break;
1555#endif
1556#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1557 case CSR_MLME_GET_NEXT_REQUEST_ID:
1558 sig->u.MlmeGetNextRequest.MibAttribute.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1559 index += SIZEOF_UINT16;
1560 sig->u.MlmeGetNextRequest.MibAttribute.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1561 index += SIZEOF_UINT16;
1562 sig->u.MlmeGetNextRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1563 index += SIZEOF_UINT16;
1564 sig->u.MlmeGetNextRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1565 index += SIZEOF_UINT16;
1566 break;
1567#endif
1568#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1569 case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
1570 sig->u.MlmeSetChannelConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1571 index += SIZEOF_UINT16;
1572 sig->u.MlmeSetChannelConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1573 index += SIZEOF_UINT16;
1574 sig->u.MlmeSetChannelConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1575 index += SIZEOF_UINT16;
1576 sig->u.MlmeSetChannelConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1577 index += SIZEOF_UINT16;
1578 sig->u.MlmeSetChannelConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1579 index += SIZEOF_UINT16;
1580 sig->u.MlmeSetChannelConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1581 index += SIZEOF_UINT16;
1582 break;
1583#endif
1584#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1585 case CSR_MLME_START_AGGREGATION_REQUEST_ID:
1586 sig->u.MlmeStartAggregationRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1587 index += SIZEOF_UINT16;
1588 sig->u.MlmeStartAggregationRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1589 index += SIZEOF_UINT16;
1590 sig->u.MlmeStartAggregationRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1591 index += SIZEOF_UINT16;
1592 sig->u.MlmeStartAggregationRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1593 index += SIZEOF_UINT16;
1594 sig->u.MlmeStartAggregationRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1595 index += SIZEOF_UINT16;
1596 memcpy(sig->u.MlmeStartAggregationRequest.PeerQstaAddress.x, &ptr[index], 48 / 8);
1597 index += 48 / 8;
1598 sig->u.MlmeStartAggregationRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1599 index += SIZEOF_UINT16;
1600 sig->u.MlmeStartAggregationRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1601 index += SIZEOF_UINT16;
1602 sig->u.MlmeStartAggregationRequest.StartingSequenceNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1603 index += SIZEOF_UINT16;
1604 sig->u.MlmeStartAggregationRequest.BufferSize = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1605 index += SIZEOF_UINT16;
1606 sig->u.MlmeStartAggregationRequest.BlockAckTimeout = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1607 index += SIZEOF_UINT16;
1608 break;
1609#endif
1610#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1611 case CSR_MLME_HL_SYNC_REQUEST_ID:
1612 sig->u.MlmeHlSyncRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1613 index += SIZEOF_UINT16;
1614 sig->u.MlmeHlSyncRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1615 index += SIZEOF_UINT16;
1616 sig->u.MlmeHlSyncRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1617 index += SIZEOF_UINT16;
1618 sig->u.MlmeHlSyncRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1619 index += SIZEOF_UINT16;
1620 memcpy(sig->u.MlmeHlSyncRequest.GroupAddress.x, &ptr[index], 48 / 8);
1621 index += 48 / 8;
1622 break;
1623#endif
1624 case CSR_DEBUG_GENERIC_REQUEST_ID:
1625 sig->u.DebugGenericRequest.DebugVariable.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1626 index += SIZEOF_UINT16;
1627 sig->u.DebugGenericRequest.DebugVariable.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1628 index += SIZEOF_UINT16;
1629 sig->u.DebugGenericRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1630 index += SIZEOF_UINT16;
1631 sig->u.DebugGenericRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1632 index += SIZEOF_UINT16;
1633 sig->u.DebugGenericRequest.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1634 index += SIZEOF_UINT16;
1635 sig->u.DebugGenericRequest.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1636 index += SIZEOF_UINT16;
1637 sig->u.DebugGenericRequest.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1638 index += SIZEOF_UINT16;
1639 sig->u.DebugGenericRequest.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1640 index += SIZEOF_UINT16;
1641 sig->u.DebugGenericRequest.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1642 index += SIZEOF_UINT16;
1643 sig->u.DebugGenericRequest.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1644 index += SIZEOF_UINT16;
1645 sig->u.DebugGenericRequest.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1646 index += SIZEOF_UINT16;
1647 sig->u.DebugGenericRequest.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1648 index += SIZEOF_UINT16;
1649 break;
1650#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1651 case CSR_MLME_LEAVE_CONFIRM_ID:
1652 sig->u.MlmeLeaveConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1653 index += SIZEOF_UINT16;
1654 sig->u.MlmeLeaveConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1655 index += SIZEOF_UINT16;
1656 sig->u.MlmeLeaveConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1657 index += SIZEOF_UINT16;
1658 sig->u.MlmeLeaveConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1659 index += SIZEOF_UINT16;
1660 sig->u.MlmeLeaveConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1661 index += SIZEOF_UINT16;
1662 sig->u.MlmeLeaveConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1663 index += SIZEOF_UINT16;
1664 break;
1665#endif
1666#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1667 case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
1668 sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1669 index += SIZEOF_UINT16;
1670 sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1671 index += SIZEOF_UINT16;
1672 sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1673 index += SIZEOF_UINT16;
1674 sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1675 index += SIZEOF_UINT16;
1676 sig->u.MlmeDelTriggeredGetRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1677 index += SIZEOF_UINT16;
1678 sig->u.MlmeDelTriggeredGetRequest.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1679 index += SIZEOF_UINT16;
1680 break;
1681#endif
1682#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1683 case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
1684 sig->u.MlmeAddMulticastAddressRequest.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1685 index += SIZEOF_UINT16;
1686 sig->u.MlmeAddMulticastAddressRequest.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1687 index += SIZEOF_UINT16;
1688 sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1689 index += SIZEOF_UINT16;
1690 sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1691 index += SIZEOF_UINT16;
1692 sig->u.MlmeAddMulticastAddressRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1693 index += SIZEOF_UINT16;
1694 sig->u.MlmeAddMulticastAddressRequest.NumberOfMulticastGroupAddresses = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1695 index += SIZEOF_UINT16;
1696 break;
1697#endif
1698#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1699 case CSR_MLME_RESET_REQUEST_ID:
1700 sig->u.MlmeResetRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1701 index += SIZEOF_UINT16;
1702 sig->u.MlmeResetRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1703 index += SIZEOF_UINT16;
1704 sig->u.MlmeResetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1705 index += SIZEOF_UINT16;
1706 sig->u.MlmeResetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1707 index += SIZEOF_UINT16;
1708 memcpy(sig->u.MlmeResetRequest.StaAddress.x, &ptr[index], 48 / 8);
1709 index += 48 / 8;
1710 sig->u.MlmeResetRequest.SetDefaultMib = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1711 index += SIZEOF_UINT16;
1712 break;
1713#endif
1714#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1715 case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
1716 sig->u.MlmeScanCancelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1717 index += SIZEOF_UINT16;
1718 sig->u.MlmeScanCancelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1719 index += SIZEOF_UINT16;
1720 sig->u.MlmeScanCancelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1721 index += SIZEOF_UINT16;
1722 sig->u.MlmeScanCancelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1723 index += SIZEOF_UINT16;
1724 sig->u.MlmeScanCancelRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1725 index += SIZEOF_UINT16;
1726 break;
1727#endif
1728#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1729 case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
1730 sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1731 index += SIZEOF_UINT16;
1732 sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1733 index += SIZEOF_UINT16;
1734 sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1735 index += SIZEOF_UINT16;
1736 sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1737 index += SIZEOF_UINT16;
1738 sig->u.MlmeAddTriggeredGetConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1739 index += SIZEOF_UINT16;
1740 sig->u.MlmeAddTriggeredGetConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1741 index += SIZEOF_UINT16;
1742 sig->u.MlmeAddTriggeredGetConfirm.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1743 index += SIZEOF_UINT16;
1744 break;
1745#endif
1746#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1747 case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
1748 sig->u.MlmeSetPacketFilterRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1749 index += SIZEOF_UINT16;
1750 sig->u.MlmeSetPacketFilterRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1751 index += SIZEOF_UINT16;
1752 sig->u.MlmeSetPacketFilterRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1753 index += SIZEOF_UINT16;
1754 sig->u.MlmeSetPacketFilterRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1755 index += SIZEOF_UINT16;
1756 sig->u.MlmeSetPacketFilterRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1757 index += SIZEOF_UINT16;
1758 sig->u.MlmeSetPacketFilterRequest.PacketFilterMode = (CSR_PACKET_FILTER_MODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1759 index += SIZEOF_UINT16;
1760 sig->u.MlmeSetPacketFilterRequest.ArpFilterAddress = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
1761 index += SIZEOF_UINT32;
1762 break;
1763#endif
1764#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1765 case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
1766 sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1767 index += SIZEOF_UINT16;
1768 sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1769 index += SIZEOF_UINT16;
1770 sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1771 index += SIZEOF_UINT16;
1772 sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1773 index += SIZEOF_UINT16;
1774 sig->u.MlmeDelRxTriggerConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1775 index += SIZEOF_UINT16;
1776 sig->u.MlmeDelRxTriggerConfirm.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1777 index += SIZEOF_UINT16;
1778 sig->u.MlmeDelRxTriggerConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1779 index += SIZEOF_UINT16;
1780 break;
1781#endif
1782#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1783 case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
1784 sig->u.MlmeConnectStatusRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1785 index += SIZEOF_UINT16;
1786 sig->u.MlmeConnectStatusRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1787 index += SIZEOF_UINT16;
1788 sig->u.MlmeConnectStatusRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1789 index += SIZEOF_UINT16;
1790 sig->u.MlmeConnectStatusRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1791 index += SIZEOF_UINT16;
1792 sig->u.MlmeConnectStatusRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1793 index += SIZEOF_UINT16;
1794 sig->u.MlmeConnectStatusRequest.ConnectionStatus = (CSR_CONNECTION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1795 index += SIZEOF_UINT16;
1796 memcpy(sig->u.MlmeConnectStatusRequest.StaAddress.x, &ptr[index], 48 / 8);
1797 index += 48 / 8;
1798 sig->u.MlmeConnectStatusRequest.AssociationId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1799 index += SIZEOF_UINT16;
1800 sig->u.MlmeConnectStatusRequest.AssociationCapabilityInformation = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1801 index += SIZEOF_UINT16;
1802 break;
1803#endif
1804#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1805 case CSR_MLME_LEAVE_REQUEST_ID:
1806 sig->u.MlmeLeaveRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1807 index += SIZEOF_UINT16;
1808 sig->u.MlmeLeaveRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1809 index += SIZEOF_UINT16;
1810 sig->u.MlmeLeaveRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1811 index += SIZEOF_UINT16;
1812 sig->u.MlmeLeaveRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1813 index += SIZEOF_UINT16;
1814 sig->u.MlmeLeaveRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1815 index += SIZEOF_UINT16;
1816 break;
1817#endif
1818#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1819 case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
1820 sig->u.MlmeConfigQueueRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1821 index += SIZEOF_UINT16;
1822 sig->u.MlmeConfigQueueRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1823 index += SIZEOF_UINT16;
1824 sig->u.MlmeConfigQueueRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1825 index += SIZEOF_UINT16;
1826 sig->u.MlmeConfigQueueRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1827 index += SIZEOF_UINT16;
1828 sig->u.MlmeConfigQueueRequest.QueueIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1829 index += SIZEOF_UINT16;
1830 sig->u.MlmeConfigQueueRequest.Aifs = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1831 index += SIZEOF_UINT16;
1832 sig->u.MlmeConfigQueueRequest.Cwmin = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1833 index += SIZEOF_UINT16;
1834 sig->u.MlmeConfigQueueRequest.Cwmax = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1835 index += SIZEOF_UINT16;
1836 sig->u.MlmeConfigQueueRequest.TxopLimit = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1837 index += SIZEOF_UINT16;
1838 break;
1839#endif
1840#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1841 case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
1842 sig->u.MlmeDelTspecConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1843 index += SIZEOF_UINT16;
1844 sig->u.MlmeDelTspecConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1845 index += SIZEOF_UINT16;
1846 sig->u.MlmeDelTspecConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1847 index += SIZEOF_UINT16;
1848 sig->u.MlmeDelTspecConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1849 index += SIZEOF_UINT16;
1850 sig->u.MlmeDelTspecConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1851 index += SIZEOF_UINT16;
1852 sig->u.MlmeDelTspecConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1853 index += SIZEOF_UINT16;
1854 sig->u.MlmeDelTspecConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1855 index += SIZEOF_UINT16;
1856 break;
1857#endif
1858 case CSR_MLME_SET_TIM_CONFIRM_ID:
1859 sig->u.MlmeSetTimConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1860 index += SIZEOF_UINT16;
1861 sig->u.MlmeSetTimConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1862 index += SIZEOF_UINT16;
1863 sig->u.MlmeSetTimConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1864 index += SIZEOF_UINT16;
1865 sig->u.MlmeSetTimConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1866 index += SIZEOF_UINT16;
1867 sig->u.MlmeSetTimConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1868 index += SIZEOF_UINT16;
1869 sig->u.MlmeSetTimConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1870 index += SIZEOF_UINT16;
1871 break;
1872#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1873 case CSR_MLME_MEASURE_INDICATION_ID:
1874 sig->u.MlmeMeasureIndication.MeasurementReportSet.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1875 index += SIZEOF_UINT16;
1876 sig->u.MlmeMeasureIndication.MeasurementReportSet.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1877 index += SIZEOF_UINT16;
1878 sig->u.MlmeMeasureIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1879 index += SIZEOF_UINT16;
1880 sig->u.MlmeMeasureIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1881 index += SIZEOF_UINT16;
1882 sig->u.MlmeMeasureIndication.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1883 index += SIZEOF_UINT16;
1884 break;
1885#endif
1886#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1887 case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
1888 sig->u.MlmeDelBlackoutConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1889 index += SIZEOF_UINT16;
1890 sig->u.MlmeDelBlackoutConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1891 index += SIZEOF_UINT16;
1892 sig->u.MlmeDelBlackoutConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1893 index += SIZEOF_UINT16;
1894 sig->u.MlmeDelBlackoutConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1895 index += SIZEOF_UINT16;
1896 sig->u.MlmeDelBlackoutConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1897 index += SIZEOF_UINT16;
1898 sig->u.MlmeDelBlackoutConfirm.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1899 index += SIZEOF_UINT16;
1900 sig->u.MlmeDelBlackoutConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1901 index += SIZEOF_UINT16;
1902 break;
1903#endif
1904#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1905 case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
1906 sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1907 index += SIZEOF_UINT16;
1908 sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1909 index += SIZEOF_UINT16;
1910 sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1911 index += SIZEOF_UINT16;
1912 sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1913 index += SIZEOF_UINT16;
1914 sig->u.MlmeDelTriggeredGetConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1915 index += SIZEOF_UINT16;
1916 sig->u.MlmeDelTriggeredGetConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1917 index += SIZEOF_UINT16;
1918 sig->u.MlmeDelTriggeredGetConfirm.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1919 index += SIZEOF_UINT16;
1920 break;
1921#endif
1922 case CSR_DEBUG_GENERIC_INDICATION_ID:
1923 sig->u.DebugGenericIndication.DebugVariable.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1924 index += SIZEOF_UINT16;
1925 sig->u.DebugGenericIndication.DebugVariable.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1926 index += SIZEOF_UINT16;
1927 sig->u.DebugGenericIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1928 index += SIZEOF_UINT16;
1929 sig->u.DebugGenericIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1930 index += SIZEOF_UINT16;
1931 sig->u.DebugGenericIndication.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1932 index += SIZEOF_UINT16;
1933 sig->u.DebugGenericIndication.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1934 index += SIZEOF_UINT16;
1935 sig->u.DebugGenericIndication.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1936 index += SIZEOF_UINT16;
1937 sig->u.DebugGenericIndication.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1938 index += SIZEOF_UINT16;
1939 sig->u.DebugGenericIndication.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1940 index += SIZEOF_UINT16;
1941 sig->u.DebugGenericIndication.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1942 index += SIZEOF_UINT16;
1943 sig->u.DebugGenericIndication.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1944 index += SIZEOF_UINT16;
1945 sig->u.DebugGenericIndication.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1946 index += SIZEOF_UINT16;
1947 break;
1948 case CSR_MA_PACKET_CANCEL_REQUEST_ID:
1949 sig->u.MaPacketCancelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1950 index += SIZEOF_UINT16;
1951 sig->u.MaPacketCancelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1952 index += SIZEOF_UINT16;
1953 sig->u.MaPacketCancelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1954 index += SIZEOF_UINT16;
1955 sig->u.MaPacketCancelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1956 index += SIZEOF_UINT16;
1957 sig->u.MaPacketCancelRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1958 index += SIZEOF_UINT16;
1959 sig->u.MaPacketCancelRequest.HostTag = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
1960 index += SIZEOF_UINT32;
1961 break;
1962#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1963 case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
1964 sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1965 index += SIZEOF_UINT16;
1966 sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1967 index += SIZEOF_UINT16;
1968 sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1969 index += SIZEOF_UINT16;
1970 sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1971 index += SIZEOF_UINT16;
1972 sig->u.MlmeModifyBssParameterConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1973 index += SIZEOF_UINT16;
1974 sig->u.MlmeModifyBssParameterConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1975 index += SIZEOF_UINT16;
1976 break;
1977#endif
1978#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1979 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
1980 sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1981 index += SIZEOF_UINT16;
1982 sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1983 index += SIZEOF_UINT16;
1984 sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1985 index += SIZEOF_UINT16;
1986 sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1987 index += SIZEOF_UINT16;
1988 sig->u.MlmePauseAutonomousScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1989 index += SIZEOF_UINT16;
1990 sig->u.MlmePauseAutonomousScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1991 index += SIZEOF_UINT16;
1992 sig->u.MlmePauseAutonomousScanConfirm.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1993 index += SIZEOF_UINT16;
1994 break;
1995#endif
1996 case CSR_MA_PACKET_REQUEST_ID:
1997 sig->u.MaPacketRequest.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
1998 index += SIZEOF_UINT16;
1999 sig->u.MaPacketRequest.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2000 index += SIZEOF_UINT16;
2001 sig->u.MaPacketRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2002 index += SIZEOF_UINT16;
2003 sig->u.MaPacketRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2004 index += SIZEOF_UINT16;
2005 sig->u.MaPacketRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2006 index += SIZEOF_UINT16;
2007 sig->u.MaPacketRequest.TransmitRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2008 index += SIZEOF_UINT16;
2009 sig->u.MaPacketRequest.HostTag = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2010 index += SIZEOF_UINT32;
2011 sig->u.MaPacketRequest.Priority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2012 index += SIZEOF_UINT16;
2013 memcpy(sig->u.MaPacketRequest.Ra.x, &ptr[index], 48 / 8);
2014 index += 48 / 8;
2015 sig->u.MaPacketRequest.TransmissionControl = (CSR_TRANSMISSION_CONTROL) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2016 index += SIZEOF_UINT16;
2017 break;
2018#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2019 case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
2020 sig->u.MlmeModifyBssParameterRequest.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2021 index += SIZEOF_UINT16;
2022 sig->u.MlmeModifyBssParameterRequest.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2023 index += SIZEOF_UINT16;
2024 sig->u.MlmeModifyBssParameterRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2025 index += SIZEOF_UINT16;
2026 sig->u.MlmeModifyBssParameterRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2027 index += SIZEOF_UINT16;
2028 sig->u.MlmeModifyBssParameterRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2029 index += SIZEOF_UINT16;
2030 sig->u.MlmeModifyBssParameterRequest.BeaconPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2031 index += SIZEOF_UINT16;
2032 sig->u.MlmeModifyBssParameterRequest.DtimPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2033 index += SIZEOF_UINT16;
2034 sig->u.MlmeModifyBssParameterRequest.CapabilityInformation = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2035 index += SIZEOF_UINT16;
2036 memcpy(sig->u.MlmeModifyBssParameterRequest.Bssid.x, &ptr[index], 48 / 8);
2037 index += 48 / 8;
2038 sig->u.MlmeModifyBssParameterRequest.RtsThreshold = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2039 index += SIZEOF_UINT16;
2040 break;
2041#endif
2042#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2043 case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
2044 sig->u.MlmeAddRxTriggerRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2045 index += SIZEOF_UINT16;
2046 sig->u.MlmeAddRxTriggerRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2047 index += SIZEOF_UINT16;
2048 sig->u.MlmeAddRxTriggerRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2049 index += SIZEOF_UINT16;
2050 sig->u.MlmeAddRxTriggerRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2051 index += SIZEOF_UINT16;
2052 sig->u.MlmeAddRxTriggerRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2053 index += SIZEOF_UINT16;
2054 sig->u.MlmeAddRxTriggerRequest.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2055 index += SIZEOF_UINT16;
2056 sig->u.MlmeAddRxTriggerRequest.Priority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2057 index += SIZEOF_UINT16;
2058 break;
2059#endif
2060 case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
2061 sig->u.MaVifAvailabilityIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2062 index += SIZEOF_UINT16;
2063 sig->u.MaVifAvailabilityIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2064 index += SIZEOF_UINT16;
2065 sig->u.MaVifAvailabilityIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2066 index += SIZEOF_UINT16;
2067 sig->u.MaVifAvailabilityIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2068 index += SIZEOF_UINT16;
2069 sig->u.MaVifAvailabilityIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2070 index += SIZEOF_UINT16;
2071 sig->u.MaVifAvailabilityIndication.Multicast = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2072 index += SIZEOF_UINT16;
2073 break;
2074#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2075 case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
2076 sig->u.MlmeHlSyncCancelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2077 index += SIZEOF_UINT16;
2078 sig->u.MlmeHlSyncCancelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2079 index += SIZEOF_UINT16;
2080 sig->u.MlmeHlSyncCancelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2081 index += SIZEOF_UINT16;
2082 sig->u.MlmeHlSyncCancelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2083 index += SIZEOF_UINT16;
2084 memcpy(sig->u.MlmeHlSyncCancelRequest.GroupAddress.x, &ptr[index], 48 / 8);
2085 index += 48 / 8;
2086 break;
2087#endif
2088#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2089 case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
2090 sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2091 index += SIZEOF_UINT16;
2092 sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2093 index += SIZEOF_UINT16;
2094 sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2095 index += SIZEOF_UINT16;
2096 sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2097 index += SIZEOF_UINT16;
2098 sig->u.MlmeDelAutonomousScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2099 index += SIZEOF_UINT16;
2100 sig->u.MlmeDelAutonomousScanRequest.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2101 index += SIZEOF_UINT16;
2102 break;
2103#endif
2104#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2105 case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
2106 sig->u.MlmeBlackoutEndedIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2107 index += SIZEOF_UINT16;
2108 sig->u.MlmeBlackoutEndedIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2109 index += SIZEOF_UINT16;
2110 sig->u.MlmeBlackoutEndedIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2111 index += SIZEOF_UINT16;
2112 sig->u.MlmeBlackoutEndedIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2113 index += SIZEOF_UINT16;
2114 sig->u.MlmeBlackoutEndedIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2115 index += SIZEOF_UINT16;
2116 sig->u.MlmeBlackoutEndedIndication.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2117 index += SIZEOF_UINT16;
2118 break;
2119#endif
2120#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2121 case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
2122 sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2123 index += SIZEOF_UINT16;
2124 sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2125 index += SIZEOF_UINT16;
2126 sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2127 index += SIZEOF_UINT16;
2128 sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2129 index += SIZEOF_UINT16;
2130 sig->u.MlmeAutonomousScanDoneIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2131 index += SIZEOF_UINT16;
2132 sig->u.MlmeAutonomousScanDoneIndication.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2133 index += SIZEOF_UINT16;
2134 sig->u.MlmeAutonomousScanDoneIndication.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2135 index += SIZEOF_UINT16;
2136 break;
2137#endif
2138#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2139 case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
2140 sig->u.MlmeGetKeySequenceRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2141 index += SIZEOF_UINT16;
2142 sig->u.MlmeGetKeySequenceRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2143 index += SIZEOF_UINT16;
2144 sig->u.MlmeGetKeySequenceRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2145 index += SIZEOF_UINT16;
2146 sig->u.MlmeGetKeySequenceRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2147 index += SIZEOF_UINT16;
2148 sig->u.MlmeGetKeySequenceRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2149 index += SIZEOF_UINT16;
2150 sig->u.MlmeGetKeySequenceRequest.KeyId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2151 index += SIZEOF_UINT16;
2152 sig->u.MlmeGetKeySequenceRequest.KeyType = (CSR_KEY_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2153 index += SIZEOF_UINT16;
2154 memcpy(sig->u.MlmeGetKeySequenceRequest.Address.x, &ptr[index], 48 / 8);
2155 index += 48 / 8;
2156 break;
2157#endif
2158#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2159 case CSR_MLME_SET_CHANNEL_REQUEST_ID:
2160 sig->u.MlmeSetChannelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2161 index += SIZEOF_UINT16;
2162 sig->u.MlmeSetChannelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2163 index += SIZEOF_UINT16;
2164 sig->u.MlmeSetChannelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2165 index += SIZEOF_UINT16;
2166 sig->u.MlmeSetChannelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2167 index += SIZEOF_UINT16;
2168 sig->u.MlmeSetChannelRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2169 index += SIZEOF_UINT16;
2170 sig->u.MlmeSetChannelRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2171 index += SIZEOF_UINT16;
2172 sig->u.MlmeSetChannelRequest.Channel = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2173 index += SIZEOF_UINT16;
2174 memcpy(sig->u.MlmeSetChannelRequest.Address.x, &ptr[index], 48 / 8);
2175 index += 48 / 8;
2176 sig->u.MlmeSetChannelRequest.AvailabilityDuration = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2177 index += SIZEOF_UINT16;
2178 sig->u.MlmeSetChannelRequest.AvailabilityInterval = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2179 index += SIZEOF_UINT16;
2180 break;
2181#endif
2182#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2183 case CSR_MLME_MEASURE_CONFIRM_ID:
2184 sig->u.MlmeMeasureConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2185 index += SIZEOF_UINT16;
2186 sig->u.MlmeMeasureConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2187 index += SIZEOF_UINT16;
2188 sig->u.MlmeMeasureConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2189 index += SIZEOF_UINT16;
2190 sig->u.MlmeMeasureConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2191 index += SIZEOF_UINT16;
2192 sig->u.MlmeMeasureConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2193 index += SIZEOF_UINT16;
2194 sig->u.MlmeMeasureConfirm.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2195 index += SIZEOF_UINT16;
2196 break;
2197#endif
2198#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2199 case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
2200 sig->u.MlmeAddTriggeredGetRequest.MibAttribute.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2201 index += SIZEOF_UINT16;
2202 sig->u.MlmeAddTriggeredGetRequest.MibAttribute.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2203 index += SIZEOF_UINT16;
2204 sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2205 index += SIZEOF_UINT16;
2206 sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2207 index += SIZEOF_UINT16;
2208 sig->u.MlmeAddTriggeredGetRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2209 index += SIZEOF_UINT16;
2210 sig->u.MlmeAddTriggeredGetRequest.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2211 index += SIZEOF_UINT16;
2212 break;
2213#endif
2214#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2215 case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
2216 sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2217 index += SIZEOF_UINT16;
2218 sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2219 index += SIZEOF_UINT16;
2220 sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2221 index += SIZEOF_UINT16;
2222 sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2223 index += SIZEOF_UINT16;
2224 sig->u.MlmeAutonomousScanLossIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2225 index += SIZEOF_UINT16;
2226 memcpy(sig->u.MlmeAutonomousScanLossIndication.Bssid.x, &ptr[index], 48 / 8);
2227 index += 48 / 8;
2228 break;
2229#endif
2230 case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
2231 sig->u.MaVifAvailabilityResponse.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2232 index += SIZEOF_UINT16;
2233 sig->u.MaVifAvailabilityResponse.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2234 index += SIZEOF_UINT16;
2235 sig->u.MaVifAvailabilityResponse.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2236 index += SIZEOF_UINT16;
2237 sig->u.MaVifAvailabilityResponse.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2238 index += SIZEOF_UINT16;
2239 sig->u.MaVifAvailabilityResponse.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2240 index += SIZEOF_UINT16;
2241 sig->u.MaVifAvailabilityResponse.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2242 index += SIZEOF_UINT16;
2243 break;
2244#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2245 case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
2246 sig->u.MlmeAddTemplateRequest.Data1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2247 index += SIZEOF_UINT16;
2248 sig->u.MlmeAddTemplateRequest.Data1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2249 index += SIZEOF_UINT16;
2250 sig->u.MlmeAddTemplateRequest.Data2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2251 index += SIZEOF_UINT16;
2252 sig->u.MlmeAddTemplateRequest.Data2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2253 index += SIZEOF_UINT16;
2254 sig->u.MlmeAddTemplateRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2255 index += SIZEOF_UINT16;
2256 sig->u.MlmeAddTemplateRequest.FrameType = (CSR_FRAME_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2257 index += SIZEOF_UINT16;
2258 sig->u.MlmeAddTemplateRequest.MinTransmitRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2259 index += SIZEOF_UINT16;
2260 break;
2261#endif
2262#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2263 case CSR_MLME_POWERMGT_CONFIRM_ID:
2264 sig->u.MlmePowermgtConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2265 index += SIZEOF_UINT16;
2266 sig->u.MlmePowermgtConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2267 index += SIZEOF_UINT16;
2268 sig->u.MlmePowermgtConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2269 index += SIZEOF_UINT16;
2270 sig->u.MlmePowermgtConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2271 index += SIZEOF_UINT16;
2272 sig->u.MlmePowermgtConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2273 index += SIZEOF_UINT16;
2274 sig->u.MlmePowermgtConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2275 index += SIZEOF_UINT16;
2276 break;
2277#endif
2278#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2279 case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
2280 sig->u.MlmeAddPeriodicConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2281 index += SIZEOF_UINT16;
2282 sig->u.MlmeAddPeriodicConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2283 index += SIZEOF_UINT16;
2284 sig->u.MlmeAddPeriodicConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2285 index += SIZEOF_UINT16;
2286 sig->u.MlmeAddPeriodicConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2287 index += SIZEOF_UINT16;
2288 sig->u.MlmeAddPeriodicConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2289 index += SIZEOF_UINT16;
2290 sig->u.MlmeAddPeriodicConfirm.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2291 index += SIZEOF_UINT16;
2292 sig->u.MlmeAddPeriodicConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2293 index += SIZEOF_UINT16;
2294 break;
2295#endif
2296#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2297 case CSR_MLME_GET_CONFIRM_ID:
2298 sig->u.MlmeGetConfirm.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2299 index += SIZEOF_UINT16;
2300 sig->u.MlmeGetConfirm.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2301 index += SIZEOF_UINT16;
2302 sig->u.MlmeGetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2303 index += SIZEOF_UINT16;
2304 sig->u.MlmeGetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2305 index += SIZEOF_UINT16;
2306 sig->u.MlmeGetConfirm.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2307 index += SIZEOF_UINT16;
2308 sig->u.MlmeGetConfirm.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2309 index += SIZEOF_UINT16;
2310 break;
2311#endif
2312#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2313 case CSR_MLME_GET_NEXT_CONFIRM_ID:
2314 sig->u.MlmeGetNextConfirm.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2315 index += SIZEOF_UINT16;
2316 sig->u.MlmeGetNextConfirm.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2317 index += SIZEOF_UINT16;
2318 sig->u.MlmeGetNextConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2319 index += SIZEOF_UINT16;
2320 sig->u.MlmeGetNextConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2321 index += SIZEOF_UINT16;
2322 sig->u.MlmeGetNextConfirm.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2323 index += SIZEOF_UINT16;
2324 sig->u.MlmeGetNextConfirm.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2325 index += SIZEOF_UINT16;
2326 break;
2327#endif
2328#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2329 case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
2330 sig->u.MlmeStopAggregationRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2331 index += SIZEOF_UINT16;
2332 sig->u.MlmeStopAggregationRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2333 index += SIZEOF_UINT16;
2334 sig->u.MlmeStopAggregationRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2335 index += SIZEOF_UINT16;
2336 sig->u.MlmeStopAggregationRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2337 index += SIZEOF_UINT16;
2338 sig->u.MlmeStopAggregationRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2339 index += SIZEOF_UINT16;
2340 memcpy(sig->u.MlmeStopAggregationRequest.PeerQstaAddress.x, &ptr[index], 48 / 8);
2341 index += 48 / 8;
2342 sig->u.MlmeStopAggregationRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2343 index += SIZEOF_UINT16;
2344 sig->u.MlmeStopAggregationRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2345 index += SIZEOF_UINT16;
2346 break;
2347#endif
2348#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2349 case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
2350 sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2351 index += SIZEOF_UINT16;
2352 sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2353 index += SIZEOF_UINT16;
2354 sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2355 index += SIZEOF_UINT16;
2356 sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2357 index += SIZEOF_UINT16;
2358 sig->u.MlmeAddRxTriggerConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2359 index += SIZEOF_UINT16;
2360 sig->u.MlmeAddRxTriggerConfirm.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2361 index += SIZEOF_UINT16;
2362 sig->u.MlmeAddRxTriggerConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2363 index += SIZEOF_UINT16;
2364 break;
2365#endif
2366#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2367 case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
2368 sig->u.MlmeAddBlackoutRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2369 index += SIZEOF_UINT16;
2370 sig->u.MlmeAddBlackoutRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2371 index += SIZEOF_UINT16;
2372 sig->u.MlmeAddBlackoutRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2373 index += SIZEOF_UINT16;
2374 sig->u.MlmeAddBlackoutRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2375 index += SIZEOF_UINT16;
2376 sig->u.MlmeAddBlackoutRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2377 index += SIZEOF_UINT16;
2378 sig->u.MlmeAddBlackoutRequest.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2379 index += SIZEOF_UINT16;
2380 sig->u.MlmeAddBlackoutRequest.BlackoutType = (CSR_BLACKOUT_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2381 index += SIZEOF_UINT16;
2382 sig->u.MlmeAddBlackoutRequest.BlackoutSource = (CSR_BLACKOUT_SOURCE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2383 index += SIZEOF_UINT16;
2384 sig->u.MlmeAddBlackoutRequest.BlackoutStartReference = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2385 index += SIZEOF_UINT32;
2386 sig->u.MlmeAddBlackoutRequest.BlackoutPeriod = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2387 index += SIZEOF_UINT32;
2388 sig->u.MlmeAddBlackoutRequest.BlackoutDuration = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2389 index += SIZEOF_UINT32;
2390 memcpy(sig->u.MlmeAddBlackoutRequest.PeerStaAddress.x, &ptr[index], 48 / 8);
2391 index += 48 / 8;
2392 sig->u.MlmeAddBlackoutRequest.BlackoutCount = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2393 index += SIZEOF_UINT16;
2394 break;
2395#endif
2396#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2397 case CSR_MLME_DELETEKEYS_REQUEST_ID:
2398 sig->u.MlmeDeletekeysRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2399 index += SIZEOF_UINT16;
2400 sig->u.MlmeDeletekeysRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2401 index += SIZEOF_UINT16;
2402 sig->u.MlmeDeletekeysRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2403 index += SIZEOF_UINT16;
2404 sig->u.MlmeDeletekeysRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2405 index += SIZEOF_UINT16;
2406 sig->u.MlmeDeletekeysRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2407 index += SIZEOF_UINT16;
2408 sig->u.MlmeDeletekeysRequest.KeyId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2409 index += SIZEOF_UINT16;
2410 sig->u.MlmeDeletekeysRequest.KeyType = (CSR_KEY_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2411 index += SIZEOF_UINT16;
2412 memcpy(sig->u.MlmeDeletekeysRequest.Address.x, &ptr[index], 48 / 8);
2413 index += 48 / 8;
2414 break;
2415#endif
2416#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2417 case CSR_MLME_RESET_CONFIRM_ID:
2418 sig->u.MlmeResetConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2419 index += SIZEOF_UINT16;
2420 sig->u.MlmeResetConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2421 index += SIZEOF_UINT16;
2422 sig->u.MlmeResetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2423 index += SIZEOF_UINT16;
2424 sig->u.MlmeResetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2425 index += SIZEOF_UINT16;
2426 sig->u.MlmeResetConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2427 index += SIZEOF_UINT16;
2428 break;
2429#endif
2430#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2431 case CSR_MLME_HL_SYNC_CONFIRM_ID:
2432 sig->u.MlmeHlSyncConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2433 index += SIZEOF_UINT16;
2434 sig->u.MlmeHlSyncConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2435 index += SIZEOF_UINT16;
2436 sig->u.MlmeHlSyncConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2437 index += SIZEOF_UINT16;
2438 sig->u.MlmeHlSyncConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2439 index += SIZEOF_UINT16;
2440 memcpy(sig->u.MlmeHlSyncConfirm.GroupAddress.x, &ptr[index], 48 / 8);
2441 index += 48 / 8;
2442 sig->u.MlmeHlSyncConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2443 index += SIZEOF_UINT16;
2444 break;
2445#endif
2446#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2447 case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
2448 sig->u.MlmeAddAutonomousScanRequest.ChannelList.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2449 index += SIZEOF_UINT16;
2450 sig->u.MlmeAddAutonomousScanRequest.ChannelList.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2451 index += SIZEOF_UINT16;
2452 sig->u.MlmeAddAutonomousScanRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2453 index += SIZEOF_UINT16;
2454 sig->u.MlmeAddAutonomousScanRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2455 index += SIZEOF_UINT16;
2456 sig->u.MlmeAddAutonomousScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2457 index += SIZEOF_UINT16;
2458 sig->u.MlmeAddAutonomousScanRequest.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2459 index += SIZEOF_UINT16;
2460 sig->u.MlmeAddAutonomousScanRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2461 index += SIZEOF_UINT16;
2462 sig->u.MlmeAddAutonomousScanRequest.ChannelStartingFactor = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2463 index += SIZEOF_UINT16;
2464 sig->u.MlmeAddAutonomousScanRequest.ScanType = (CSR_SCAN_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2465 index += SIZEOF_UINT16;
2466 sig->u.MlmeAddAutonomousScanRequest.ProbeDelay = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2467 index += SIZEOF_UINT32;
2468 sig->u.MlmeAddAutonomousScanRequest.MinChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2469 index += SIZEOF_UINT16;
2470 sig->u.MlmeAddAutonomousScanRequest.MaxChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2471 index += SIZEOF_UINT16;
2472 break;
2473#endif
2474#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2475 case CSR_MLME_SET_REQUEST_ID:
2476 sig->u.MlmeSetRequest.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2477 index += SIZEOF_UINT16;
2478 sig->u.MlmeSetRequest.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2479 index += SIZEOF_UINT16;
2480 sig->u.MlmeSetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2481 index += SIZEOF_UINT16;
2482 sig->u.MlmeSetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2483 index += SIZEOF_UINT16;
2484 break;
2485#endif
2486#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2487 case CSR_MLME_SM_START_REQUEST_ID:
2488 sig->u.MlmeSmStartRequest.Beacon.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2489 index += SIZEOF_UINT16;
2490 sig->u.MlmeSmStartRequest.Beacon.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2491 index += SIZEOF_UINT16;
2492 sig->u.MlmeSmStartRequest.BssParameters.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2493 index += SIZEOF_UINT16;
2494 sig->u.MlmeSmStartRequest.BssParameters.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2495 index += SIZEOF_UINT16;
2496 sig->u.MlmeSmStartRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2497 index += SIZEOF_UINT16;
2498 sig->u.MlmeSmStartRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2499 index += SIZEOF_UINT16;
2500 sig->u.MlmeSmStartRequest.Channel = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2501 index += SIZEOF_UINT16;
2502 memcpy(sig->u.MlmeSmStartRequest.InterfaceAddress.x, &ptr[index], 48 / 8);
2503 index += 48 / 8;
2504 memcpy(sig->u.MlmeSmStartRequest.Bssid.x, &ptr[index], 48 / 8);
2505 index += 48 / 8;
2506 sig->u.MlmeSmStartRequest.BeaconPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2507 index += SIZEOF_UINT16;
2508 sig->u.MlmeSmStartRequest.DtimPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2509 index += SIZEOF_UINT16;
2510 sig->u.MlmeSmStartRequest.CapabilityInformation = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2511 index += SIZEOF_UINT16;
2512 break;
2513#endif
2514#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2515 case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
2516 sig->u.MlmeConnectStatusConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2517 index += SIZEOF_UINT16;
2518 sig->u.MlmeConnectStatusConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2519 index += SIZEOF_UINT16;
2520 sig->u.MlmeConnectStatusConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2521 index += SIZEOF_UINT16;
2522 sig->u.MlmeConnectStatusConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2523 index += SIZEOF_UINT16;
2524 sig->u.MlmeConnectStatusConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2525 index += SIZEOF_UINT16;
2526 sig->u.MlmeConnectStatusConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2527 index += SIZEOF_UINT16;
2528 break;
2529#endif
2530#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2531 case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
2532 sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2533 index += SIZEOF_UINT16;
2534 sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2535 index += SIZEOF_UINT16;
2536 sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2537 index += SIZEOF_UINT16;
2538 sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2539 index += SIZEOF_UINT16;
2540 sig->u.MlmeDelAutonomousScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2541 index += SIZEOF_UINT16;
2542 sig->u.MlmeDelAutonomousScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2543 index += SIZEOF_UINT16;
2544 sig->u.MlmeDelAutonomousScanConfirm.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2545 index += SIZEOF_UINT16;
2546 break;
2547#endif
2548#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2549 case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
2550 sig->u.MlmeDelPeriodicRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2551 index += SIZEOF_UINT16;
2552 sig->u.MlmeDelPeriodicRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2553 index += SIZEOF_UINT16;
2554 sig->u.MlmeDelPeriodicRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2555 index += SIZEOF_UINT16;
2556 sig->u.MlmeDelPeriodicRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2557 index += SIZEOF_UINT16;
2558 sig->u.MlmeDelPeriodicRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2559 index += SIZEOF_UINT16;
2560 sig->u.MlmeDelPeriodicRequest.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2561 index += SIZEOF_UINT16;
2562 break;
2563#endif
2564#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2565 case CSR_MLME_SETKEYS_REQUEST_ID:
2566 sig->u.MlmeSetkeysRequest.Key.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2567 index += SIZEOF_UINT16;
2568 sig->u.MlmeSetkeysRequest.Key.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2569 index += SIZEOF_UINT16;
2570 sig->u.MlmeSetkeysRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2571 index += SIZEOF_UINT16;
2572 sig->u.MlmeSetkeysRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2573 index += SIZEOF_UINT16;
2574 sig->u.MlmeSetkeysRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2575 index += SIZEOF_UINT16;
2576 sig->u.MlmeSetkeysRequest.Length = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2577 index += SIZEOF_UINT16;
2578 sig->u.MlmeSetkeysRequest.KeyId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2579 index += SIZEOF_UINT16;
2580 sig->u.MlmeSetkeysRequest.KeyType = (CSR_KEY_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2581 index += SIZEOF_UINT16;
2582 memcpy(sig->u.MlmeSetkeysRequest.Address.x, &ptr[index], 48 / 8);
2583 index += 48 / 8;
2584 sig->u.MlmeSetkeysRequest.SequenceNumber[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2585 index += SIZEOF_UINT16;
2586 sig->u.MlmeSetkeysRequest.SequenceNumber[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2587 index += SIZEOF_UINT16;
2588 sig->u.MlmeSetkeysRequest.SequenceNumber[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2589 index += SIZEOF_UINT16;
2590 sig->u.MlmeSetkeysRequest.SequenceNumber[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2591 index += SIZEOF_UINT16;
2592 sig->u.MlmeSetkeysRequest.SequenceNumber[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2593 index += SIZEOF_UINT16;
2594 sig->u.MlmeSetkeysRequest.SequenceNumber[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2595 index += SIZEOF_UINT16;
2596 sig->u.MlmeSetkeysRequest.SequenceNumber[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2597 index += SIZEOF_UINT16;
2598 sig->u.MlmeSetkeysRequest.SequenceNumber[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2599 index += SIZEOF_UINT16;
2600 memcpy(&sig->u.MlmeSetkeysRequest.CipherSuiteSelector, &ptr[index], 32 / 8);
2601 index += 32 / 8;
2602 break;
2603#endif
2604#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2605 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
2606 sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2607 index += SIZEOF_UINT16;
2608 sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2609 index += SIZEOF_UINT16;
2610 sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2611 index += SIZEOF_UINT16;
2612 sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2613 index += SIZEOF_UINT16;
2614 sig->u.MlmePauseAutonomousScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2615 index += SIZEOF_UINT16;
2616 sig->u.MlmePauseAutonomousScanRequest.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2617 index += SIZEOF_UINT16;
2618 sig->u.MlmePauseAutonomousScanRequest.Pause = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2619 index += SIZEOF_UINT16;
2620 break;
2621#endif
2622#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2623 case CSR_MLME_GET_REQUEST_ID:
2624 sig->u.MlmeGetRequest.MibAttribute.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2625 index += SIZEOF_UINT16;
2626 sig->u.MlmeGetRequest.MibAttribute.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2627 index += SIZEOF_UINT16;
2628 sig->u.MlmeGetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2629 index += SIZEOF_UINT16;
2630 sig->u.MlmeGetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2631 index += SIZEOF_UINT16;
2632 break;
2633#endif
2634#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2635 case CSR_MLME_POWERMGT_REQUEST_ID:
2636 sig->u.MlmePowermgtRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2637 index += SIZEOF_UINT16;
2638 sig->u.MlmePowermgtRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2639 index += SIZEOF_UINT16;
2640 sig->u.MlmePowermgtRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2641 index += SIZEOF_UINT16;
2642 sig->u.MlmePowermgtRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2643 index += SIZEOF_UINT16;
2644 sig->u.MlmePowermgtRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2645 index += SIZEOF_UINT16;
2646 sig->u.MlmePowermgtRequest.PowerManagementMode = (CSR_POWER_MANAGEMENT_MODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2647 index += SIZEOF_UINT16;
2648 sig->u.MlmePowermgtRequest.ReceiveDtims = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2649 index += SIZEOF_UINT16;
2650 sig->u.MlmePowermgtRequest.ListenInterval = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2651 index += SIZEOF_UINT16;
2652 sig->u.MlmePowermgtRequest.TrafficWindow = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2653 index += SIZEOF_UINT16;
2654 break;
2655#endif
2656 case CSR_MA_PACKET_ERROR_INDICATION_ID:
2657 sig->u.MaPacketErrorIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2658 index += SIZEOF_UINT16;
2659 sig->u.MaPacketErrorIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2660 index += SIZEOF_UINT16;
2661 sig->u.MaPacketErrorIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2662 index += SIZEOF_UINT16;
2663 sig->u.MaPacketErrorIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2664 index += SIZEOF_UINT16;
2665 sig->u.MaPacketErrorIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2666 index += SIZEOF_UINT16;
2667 memcpy(sig->u.MaPacketErrorIndication.PeerQstaAddress.x, &ptr[index], 48 / 8);
2668 index += 48 / 8;
2669 sig->u.MaPacketErrorIndication.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2670 index += SIZEOF_UINT16;
2671 sig->u.MaPacketErrorIndication.SequenceNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2672 index += SIZEOF_UINT16;
2673 break;
2674#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2675 case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
2676 sig->u.MlmeAddPeriodicRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2677 index += SIZEOF_UINT16;
2678 sig->u.MlmeAddPeriodicRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2679 index += SIZEOF_UINT16;
2680 sig->u.MlmeAddPeriodicRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2681 index += SIZEOF_UINT16;
2682 sig->u.MlmeAddPeriodicRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2683 index += SIZEOF_UINT16;
2684 sig->u.MlmeAddPeriodicRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2685 index += SIZEOF_UINT16;
2686 sig->u.MlmeAddPeriodicRequest.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2687 index += SIZEOF_UINT16;
2688 sig->u.MlmeAddPeriodicRequest.MaximumLatency = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2689 index += SIZEOF_UINT32;
2690 sig->u.MlmeAddPeriodicRequest.PeriodicSchedulingMode = (CSR_PERIODIC_SCHEDULING_MODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2691 index += SIZEOF_UINT16;
2692 sig->u.MlmeAddPeriodicRequest.WakeHost = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2693 index += SIZEOF_UINT16;
2694 sig->u.MlmeAddPeriodicRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2695 index += SIZEOF_UINT16;
2696 break;
2697#endif
2698#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2699 case CSR_MLME_ADD_TSPEC_REQUEST_ID:
2700 sig->u.MlmeAddTspecRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2701 index += SIZEOF_UINT16;
2702 sig->u.MlmeAddTspecRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2703 index += SIZEOF_UINT16;
2704 sig->u.MlmeAddTspecRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2705 index += SIZEOF_UINT16;
2706 sig->u.MlmeAddTspecRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2707 index += SIZEOF_UINT16;
2708 sig->u.MlmeAddTspecRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2709 index += SIZEOF_UINT16;
2710 sig->u.MlmeAddTspecRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2711 index += SIZEOF_UINT16;
2712 sig->u.MlmeAddTspecRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2713 index += SIZEOF_UINT16;
2714 sig->u.MlmeAddTspecRequest.PsScheme = (CSR_PS_SCHEME) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2715 index += SIZEOF_UINT16;
2716 sig->u.MlmeAddTspecRequest.MediumTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2717 index += SIZEOF_UINT16;
2718 sig->u.MlmeAddTspecRequest.ServiceStartTime = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2719 index += SIZEOF_UINT32;
2720 sig->u.MlmeAddTspecRequest.ServiceInterval = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2721 index += SIZEOF_UINT32;
2722 sig->u.MlmeAddTspecRequest.MinimumDataRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2723 index += SIZEOF_UINT16;
2724 break;
2725#endif
2726#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2727 case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
2728 sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2729 index += SIZEOF_UINT16;
2730 sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2731 index += SIZEOF_UINT16;
2732 sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2733 index += SIZEOF_UINT16;
2734 sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2735 index += SIZEOF_UINT16;
2736 sig->u.MlmeAddMulticastAddressConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2737 index += SIZEOF_UINT16;
2738 sig->u.MlmeAddMulticastAddressConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2739 index += SIZEOF_UINT16;
2740 break;
2741#endif
2742#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2743 case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
2744 sig->u.MlmeAddTspecConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2745 index += SIZEOF_UINT16;
2746 sig->u.MlmeAddTspecConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2747 index += SIZEOF_UINT16;
2748 sig->u.MlmeAddTspecConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2749 index += SIZEOF_UINT16;
2750 sig->u.MlmeAddTspecConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2751 index += SIZEOF_UINT16;
2752 sig->u.MlmeAddTspecConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2753 index += SIZEOF_UINT16;
2754 sig->u.MlmeAddTspecConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2755 index += SIZEOF_UINT16;
2756 sig->u.MlmeAddTspecConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2757 index += SIZEOF_UINT16;
2758 break;
2759#endif
2760#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2761 case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
2762 sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2763 index += SIZEOF_UINT16;
2764 sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2765 index += SIZEOF_UINT16;
2766 sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2767 index += SIZEOF_UINT16;
2768 sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2769 index += SIZEOF_UINT16;
2770 sig->u.MlmeHlSyncCancelConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2771 index += SIZEOF_UINT16;
2772 break;
2773#endif
2774#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2775 case CSR_MLME_SCAN_CONFIRM_ID:
2776 sig->u.MlmeScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2777 index += SIZEOF_UINT16;
2778 sig->u.MlmeScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2779 index += SIZEOF_UINT16;
2780 sig->u.MlmeScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2781 index += SIZEOF_UINT16;
2782 sig->u.MlmeScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2783 index += SIZEOF_UINT16;
2784 sig->u.MlmeScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2785 index += SIZEOF_UINT16;
2786 sig->u.MlmeScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2787 index += SIZEOF_UINT16;
2788 break;
2789#endif
2790 case CSR_DEBUG_STRING_INDICATION_ID:
2791 sig->u.DebugStringIndication.DebugMessage.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2792 index += SIZEOF_UINT16;
2793 sig->u.DebugStringIndication.DebugMessage.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2794 index += SIZEOF_UINT16;
2795 sig->u.DebugStringIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2796 index += SIZEOF_UINT16;
2797 sig->u.DebugStringIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2798 index += SIZEOF_UINT16;
2799 break;
2800#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2801 case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
2802 sig->u.MlmeAddTemplateConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2803 index += SIZEOF_UINT16;
2804 sig->u.MlmeAddTemplateConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2805 index += SIZEOF_UINT16;
2806 sig->u.MlmeAddTemplateConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2807 index += SIZEOF_UINT16;
2808 sig->u.MlmeAddTemplateConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2809 index += SIZEOF_UINT16;
2810 sig->u.MlmeAddTemplateConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2811 index += SIZEOF_UINT16;
2812 sig->u.MlmeAddTemplateConfirm.FrameType = (CSR_FRAME_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2813 index += SIZEOF_UINT16;
2814 sig->u.MlmeAddTemplateConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2815 index += SIZEOF_UINT16;
2816 break;
2817#endif
2818#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2819 case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
2820 sig->u.MlmeBlockackErrorIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2821 index += SIZEOF_UINT16;
2822 sig->u.MlmeBlockackErrorIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2823 index += SIZEOF_UINT16;
2824 sig->u.MlmeBlockackErrorIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2825 index += SIZEOF_UINT16;
2826 sig->u.MlmeBlockackErrorIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2827 index += SIZEOF_UINT16;
2828 sig->u.MlmeBlockackErrorIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2829 index += SIZEOF_UINT16;
2830 sig->u.MlmeBlockackErrorIndication.ResultCode = (CSR_REASON_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2831 index += SIZEOF_UINT16;
2832 memcpy(sig->u.MlmeBlockackErrorIndication.PeerQstaAddress.x, &ptr[index], 48 / 8);
2833 index += 48 / 8;
2834 break;
2835#endif
2836#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2837 case CSR_MLME_SET_CONFIRM_ID:
2838 sig->u.MlmeSetConfirm.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2839 index += SIZEOF_UINT16;
2840 sig->u.MlmeSetConfirm.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2841 index += SIZEOF_UINT16;
2842 sig->u.MlmeSetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2843 index += SIZEOF_UINT16;
2844 sig->u.MlmeSetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2845 index += SIZEOF_UINT16;
2846 sig->u.MlmeSetConfirm.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2847 index += SIZEOF_UINT16;
2848 sig->u.MlmeSetConfirm.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2849 index += SIZEOF_UINT16;
2850 break;
2851#endif
2852#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2853 case CSR_MLME_MEASURE_REQUEST_ID:
2854 sig->u.MlmeMeasureRequest.MeasurementRequestSet.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2855 index += SIZEOF_UINT16;
2856 sig->u.MlmeMeasureRequest.MeasurementRequestSet.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2857 index += SIZEOF_UINT16;
2858 sig->u.MlmeMeasureRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2859 index += SIZEOF_UINT16;
2860 sig->u.MlmeMeasureRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2861 index += SIZEOF_UINT16;
2862 sig->u.MlmeMeasureRequest.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2863 index += SIZEOF_UINT16;
2864 break;
2865#endif
2866#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2867 case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
2868 sig->u.MlmeStartAggregationConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2869 index += SIZEOF_UINT16;
2870 sig->u.MlmeStartAggregationConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2871 index += SIZEOF_UINT16;
2872 sig->u.MlmeStartAggregationConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2873 index += SIZEOF_UINT16;
2874 sig->u.MlmeStartAggregationConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2875 index += SIZEOF_UINT16;
2876 sig->u.MlmeStartAggregationConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2877 index += SIZEOF_UINT16;
2878 memcpy(sig->u.MlmeStartAggregationConfirm.PeerQstaAddress.x, &ptr[index], 48 / 8);
2879 index += 48 / 8;
2880 sig->u.MlmeStartAggregationConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2881 index += SIZEOF_UINT16;
2882 sig->u.MlmeStartAggregationConfirm.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2883 index += SIZEOF_UINT16;
2884 sig->u.MlmeStartAggregationConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2885 index += SIZEOF_UINT16;
2886 sig->u.MlmeStartAggregationConfirm.SequenceNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2887 index += SIZEOF_UINT16;
2888 break;
2889#endif
2890#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2891 case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
2892 sig->u.MlmeStopMeasureConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2893 index += SIZEOF_UINT16;
2894 sig->u.MlmeStopMeasureConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2895 index += SIZEOF_UINT16;
2896 sig->u.MlmeStopMeasureConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2897 index += SIZEOF_UINT16;
2898 sig->u.MlmeStopMeasureConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2899 index += SIZEOF_UINT16;
2900 sig->u.MlmeStopMeasureConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2901 index += SIZEOF_UINT16;
2902 sig->u.MlmeStopMeasureConfirm.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2903 index += SIZEOF_UINT16;
2904 break;
2905#endif
2906 case CSR_MA_PACKET_CONFIRM_ID:
2907 sig->u.MaPacketConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2908 index += SIZEOF_UINT16;
2909 sig->u.MaPacketConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2910 index += SIZEOF_UINT16;
2911 sig->u.MaPacketConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2912 index += SIZEOF_UINT16;
2913 sig->u.MaPacketConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2914 index += SIZEOF_UINT16;
2915 sig->u.MaPacketConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2916 index += SIZEOF_UINT16;
2917 sig->u.MaPacketConfirm.TransmissionStatus = (CSR_TRANSMISSION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2918 index += SIZEOF_UINT16;
2919 sig->u.MaPacketConfirm.RetryCount = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2920 index += SIZEOF_UINT16;
2921 sig->u.MaPacketConfirm.Rate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2922 index += SIZEOF_UINT16;
2923 sig->u.MaPacketConfirm.HostTag = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
2924 index += SIZEOF_UINT32;
2925 break;
2926#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2927 case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
2928 sig->u.MlmeDelPeriodicConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2929 index += SIZEOF_UINT16;
2930 sig->u.MlmeDelPeriodicConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2931 index += SIZEOF_UINT16;
2932 sig->u.MlmeDelPeriodicConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2933 index += SIZEOF_UINT16;
2934 sig->u.MlmeDelPeriodicConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2935 index += SIZEOF_UINT16;
2936 sig->u.MlmeDelPeriodicConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2937 index += SIZEOF_UINT16;
2938 sig->u.MlmeDelPeriodicConfirm.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2939 index += SIZEOF_UINT16;
2940 sig->u.MlmeDelPeriodicConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2941 index += SIZEOF_UINT16;
2942 break;
2943#endif
2944#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
2945 case CSR_MLME_STOP_MEASURE_REQUEST_ID:
2946 sig->u.MlmeStopMeasureRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2947 index += SIZEOF_UINT16;
2948 sig->u.MlmeStopMeasureRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2949 index += SIZEOF_UINT16;
2950 sig->u.MlmeStopMeasureRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2951 index += SIZEOF_UINT16;
2952 sig->u.MlmeStopMeasureRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2953 index += SIZEOF_UINT16;
2954 sig->u.MlmeStopMeasureRequest.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
2955 index += SIZEOF_UINT16;
2956 break;
2957#endif
2958
2959 default:
2960 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
2961 }
2962 return CSR_RESULT_SUCCESS;
2963} /* read_unpack_signal() */
2964
2965
2966/*
2967 * ---------------------------------------------------------------------------
2968 * write_pack
2969 *
2970 * Convert a signal structure, in host-native format, to the
2971 * little-endian wire format specified in the UniFi Host Interface
2972 * Protocol Specification.
2973 *
2974 * WARNING: This function is auto-generated, DO NOT EDIT!
2975 *
2976 * Arguments:
2977 * sig Pointer to signal structure to pack.
2978 * ptr Destination buffer to pack into.
2979 * sig_len Returns the length of the packed signal, i.e. the
2980 * number of bytes written to ptr.
2981 *
2982 * Returns:
2983 * CSR_RESULT_SUCCESS on success,
2984 * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the ID of signal was not recognised.
2985 * ---------------------------------------------------------------------------
2986 */
2987CsrResult write_pack(const CSR_SIGNAL *sig, u8 *ptr, u16 *sig_len)
2988{
2989 s16 index = 0;
2990
2991 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->SignalPrimitiveHeader.SignalId, ptr + index);
2992 index += SIZEOF_UINT16;
2993
2994 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->SignalPrimitiveHeader.ReceiverProcessId, ptr + index);
2995 index += SIZEOF_UINT16;
2996
2997 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->SignalPrimitiveHeader.SenderProcessId, ptr + index);
2998 index += SIZEOF_UINT16;
2999
3000 switch (sig->SignalPrimitiveHeader.SignalId)
3001 {
3002#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3003 case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
3004 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.SlotNumber, ptr + index);
3005 index += SIZEOF_UINT16;
3006 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.DataLength, ptr + index);
3007 index += SIZEOF_UINT16;
3008 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.SlotNumber, ptr + index);
3009 index += SIZEOF_UINT16;
3010 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.DataLength, ptr + index);
3011 index += SIZEOF_UINT16;
3012 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.VirtualInterfaceIdentifier, ptr + index);
3013 index += SIZEOF_UINT16;
3014 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.ResultCode, ptr + index);
3015 index += SIZEOF_UINT16;
3016 break;
3017#endif
3018#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3019 case CSR_MLME_SETKEYS_CONFIRM_ID:
3020 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref1.SlotNumber, ptr + index);
3021 index += SIZEOF_UINT16;
3022 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref1.DataLength, ptr + index);
3023 index += SIZEOF_UINT16;
3024 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref2.SlotNumber, ptr + index);
3025 index += SIZEOF_UINT16;
3026 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref2.DataLength, ptr + index);
3027 index += SIZEOF_UINT16;
3028 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.VirtualInterfaceIdentifier, ptr + index);
3029 index += SIZEOF_UINT16;
3030 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.ResultCode, ptr + index);
3031 index += SIZEOF_UINT16;
3032 break;
3033#endif
3034#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3035 case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
3036 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref1.SlotNumber, ptr + index);
3037 index += SIZEOF_UINT16;
3038 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref1.DataLength, ptr + index);
3039 index += SIZEOF_UINT16;
3040 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref2.SlotNumber, ptr + index);
3041 index += SIZEOF_UINT16;
3042 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref2.DataLength, ptr + index);
3043 index += SIZEOF_UINT16;
3044 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.ResultCode, ptr + index);
3045 index += SIZEOF_UINT16;
3046 break;
3047#endif
3048#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3049 case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
3050 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.SlotNumber, ptr + index);
3051 index += SIZEOF_UINT16;
3052 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.DataLength, ptr + index);
3053 index += SIZEOF_UINT16;
3054 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.SlotNumber, ptr + index);
3055 index += SIZEOF_UINT16;
3056 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.DataLength, ptr + index);
3057 index += SIZEOF_UINT16;
3058 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.VirtualInterfaceIdentifier, ptr + index);
3059 index += SIZEOF_UINT16;
3060 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.ResultCode, ptr + index);
3061 index += SIZEOF_UINT16;
3062 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.AutonomousScanId, ptr + index);
3063 index += SIZEOF_UINT16;
3064 break;
3065#endif
3066#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3067 case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
3068 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref1.SlotNumber, ptr + index);
3069 index += SIZEOF_UINT16;
3070 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref1.DataLength, ptr + index);
3071 index += SIZEOF_UINT16;
3072 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref2.SlotNumber, ptr + index);
3073 index += SIZEOF_UINT16;
3074 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref2.DataLength, ptr + index);
3075 index += SIZEOF_UINT16;
3076 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.VirtualInterfaceIdentifier, ptr + index);
3077 index += SIZEOF_UINT16;
3078 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.BlackoutId, ptr + index);
3079 index += SIZEOF_UINT16;
3080 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.ResultCode, ptr + index);
3081 index += SIZEOF_UINT16;
3082 break;
3083#endif
3084#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3085 case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
3086 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref1.SlotNumber, ptr + index);
3087 index += SIZEOF_UINT16;
3088 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref1.DataLength, ptr + index);
3089 index += SIZEOF_UINT16;
3090 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref2.SlotNumber, ptr + index);
3091 index += SIZEOF_UINT16;
3092 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref2.DataLength, ptr + index);
3093 index += SIZEOF_UINT16;
3094 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.VirtualInterfaceIdentifier, ptr + index);
3095 index += SIZEOF_UINT16;
3096 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.BlackoutId, ptr + index);
3097 index += SIZEOF_UINT16;
3098 break;
3099#endif
3100#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3101 case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
3102 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.SlotNumber, ptr + index);
3103 index += SIZEOF_UINT16;
3104 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.DataLength, ptr + index);
3105 index += SIZEOF_UINT16;
3106 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.SlotNumber, ptr + index);
3107 index += SIZEOF_UINT16;
3108 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.DataLength, ptr + index);
3109 index += SIZEOF_UINT16;
3110 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.VirtualInterfaceIdentifier, ptr + index);
3111 index += SIZEOF_UINT16;
3112 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.ResultCode, ptr + index);
3113 index += SIZEOF_UINT16;
3114 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[0], ptr + index);
3115 index += SIZEOF_UINT16;
3116 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[1], ptr + index);
3117 index += SIZEOF_UINT16;
3118 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[2], ptr + index);
3119 index += SIZEOF_UINT16;
3120 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[3], ptr + index);
3121 index += SIZEOF_UINT16;
3122 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[4], ptr + index);
3123 index += SIZEOF_UINT16;
3124 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[5], ptr + index);
3125 index += SIZEOF_UINT16;
3126 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[6], ptr + index);
3127 index += SIZEOF_UINT16;
3128 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[7], ptr + index);
3129 index += SIZEOF_UINT16;
3130 break;
3131#endif
3132#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3133 case CSR_MLME_SM_START_CONFIRM_ID:
3134 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref1.SlotNumber, ptr + index);
3135 index += SIZEOF_UINT16;
3136 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref1.DataLength, ptr + index);
3137 index += SIZEOF_UINT16;
3138 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref2.SlotNumber, ptr + index);
3139 index += SIZEOF_UINT16;
3140 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref2.DataLength, ptr + index);
3141 index += SIZEOF_UINT16;
3142 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.VirtualInterfaceIdentifier, ptr + index);
3143 index += SIZEOF_UINT16;
3144 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.ResultCode, ptr + index);
3145 index += SIZEOF_UINT16;
3146 break;
3147#endif
3148#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3149 case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
3150 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref1.SlotNumber, ptr + index);
3151 index += SIZEOF_UINT16;
3152 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref1.DataLength, ptr + index);
3153 index += SIZEOF_UINT16;
3154 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref2.SlotNumber, ptr + index);
3155 index += SIZEOF_UINT16;
3156 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref2.DataLength, ptr + index);
3157 index += SIZEOF_UINT16;
3158 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.VirtualInterfaceIdentifier, ptr + index);
3159 index += SIZEOF_UINT16;
3160 memcpy(ptr + index, sig->u.MlmeStopAggregationConfirm.PeerQstaAddress.x, 48 / 8);
3161 index += 48 / 8;
3162 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.UserPriority, ptr + index);
3163 index += SIZEOF_UINT16;
3164 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Direction, ptr + index);
3165 index += SIZEOF_UINT16;
3166 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.ResultCode, ptr + index);
3167 index += SIZEOF_UINT16;
3168 break;
3169#endif
3170#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3171 case CSR_MLME_DEL_TSPEC_REQUEST_ID:
3172 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref1.SlotNumber, ptr + index);
3173 index += SIZEOF_UINT16;
3174 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref1.DataLength, ptr + index);
3175 index += SIZEOF_UINT16;
3176 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref2.SlotNumber, ptr + index);
3177 index += SIZEOF_UINT16;
3178 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref2.DataLength, ptr + index);
3179 index += SIZEOF_UINT16;
3180 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.VirtualInterfaceIdentifier, ptr + index);
3181 index += SIZEOF_UINT16;
3182 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.UserPriority, ptr + index);
3183 index += SIZEOF_UINT16;
3184 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Direction, ptr + index);
3185 index += SIZEOF_UINT16;
3186 break;
3187#endif
3188 case CSR_DEBUG_WORD16_INDICATION_ID:
3189 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref1.SlotNumber, ptr + index);
3190 index += SIZEOF_UINT16;
3191 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref1.DataLength, ptr + index);
3192 index += SIZEOF_UINT16;
3193 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref2.SlotNumber, ptr + index);
3194 index += SIZEOF_UINT16;
3195 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref2.DataLength, ptr + index);
3196 index += SIZEOF_UINT16;
3197 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[0], ptr + index);
3198 index += SIZEOF_UINT16;
3199 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[1], ptr + index);
3200 index += SIZEOF_UINT16;
3201 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[2], ptr + index);
3202 index += SIZEOF_UINT16;
3203 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[3], ptr + index);
3204 index += SIZEOF_UINT16;
3205 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[4], ptr + index);
3206 index += SIZEOF_UINT16;
3207 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[5], ptr + index);
3208 index += SIZEOF_UINT16;
3209 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[6], ptr + index);
3210 index += SIZEOF_UINT16;
3211 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[7], ptr + index);
3212 index += SIZEOF_UINT16;
3213 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[8], ptr + index);
3214 index += SIZEOF_UINT16;
3215 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[9], ptr + index);
3216 index += SIZEOF_UINT16;
3217 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[10], ptr + index);
3218 index += SIZEOF_UINT16;
3219 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[11], ptr + index);
3220 index += SIZEOF_UINT16;
3221 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[12], ptr + index);
3222 index += SIZEOF_UINT16;
3223 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[13], ptr + index);
3224 index += SIZEOF_UINT16;
3225 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[14], ptr + index);
3226 index += SIZEOF_UINT16;
3227 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[15], ptr + index);
3228 index += SIZEOF_UINT16;
3229 break;
3230 case CSR_DEBUG_GENERIC_CONFIRM_ID:
3231 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugVariable.SlotNumber, ptr + index);
3232 index += SIZEOF_UINT16;
3233 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugVariable.DataLength, ptr + index);
3234 index += SIZEOF_UINT16;
3235 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.Dummydataref2.SlotNumber, ptr + index);
3236 index += SIZEOF_UINT16;
3237 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.Dummydataref2.DataLength, ptr + index);
3238 index += SIZEOF_UINT16;
3239 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[0], ptr + index);
3240 index += SIZEOF_UINT16;
3241 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[1], ptr + index);
3242 index += SIZEOF_UINT16;
3243 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[2], ptr + index);
3244 index += SIZEOF_UINT16;
3245 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[3], ptr + index);
3246 index += SIZEOF_UINT16;
3247 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[4], ptr + index);
3248 index += SIZEOF_UINT16;
3249 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[5], ptr + index);
3250 index += SIZEOF_UINT16;
3251 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[6], ptr + index);
3252 index += SIZEOF_UINT16;
3253 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[7], ptr + index);
3254 index += SIZEOF_UINT16;
3255 break;
3256 case CSR_MA_PACKET_INDICATION_ID:
3257 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Data.SlotNumber, ptr + index);
3258 index += SIZEOF_UINT16;
3259 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Data.DataLength, ptr + index);
3260 index += SIZEOF_UINT16;
3261 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Dummydataref2.SlotNumber, ptr + index);
3262 index += SIZEOF_UINT16;
3263 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Dummydataref2.DataLength, ptr + index);
3264 index += SIZEOF_UINT16;
3265 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.VirtualInterfaceIdentifier, ptr + index);
3266 index += SIZEOF_UINT16;
3267 memcpy(ptr + index, sig->u.MaPacketIndication.LocalTime.x, 64 / 8);
3268 index += 64 / 8;
3269 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Ifindex, ptr + index);
3270 index += SIZEOF_UINT16;
3271 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Channel, ptr + index);
3272 index += SIZEOF_UINT16;
3273 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.ReceptionStatus, ptr + index);
3274 index += SIZEOF_UINT16;
3275 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Rssi, ptr + index);
3276 index += SIZEOF_UINT16;
3277 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Snr, ptr + index);
3278 index += SIZEOF_UINT16;
3279 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.ReceivedRate, ptr + index);
3280 index += SIZEOF_UINT16;
3281 break;
3282 case CSR_MLME_SET_TIM_REQUEST_ID:
3283 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref1.SlotNumber, ptr + index);
3284 index += SIZEOF_UINT16;
3285 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref1.DataLength, ptr + index);
3286 index += SIZEOF_UINT16;
3287 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref2.SlotNumber, ptr + index);
3288 index += SIZEOF_UINT16;
3289 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref2.DataLength, ptr + index);
3290 index += SIZEOF_UINT16;
3291 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.VirtualInterfaceIdentifier, ptr + index);
3292 index += SIZEOF_UINT16;
3293 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.AssociationId, ptr + index);
3294 index += SIZEOF_UINT16;
3295 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.TimValue, ptr + index);
3296 index += SIZEOF_UINT16;
3297 break;
3298#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3299 case CSR_MLME_CONNECTED_INDICATION_ID:
3300 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref1.SlotNumber, ptr + index);
3301 index += SIZEOF_UINT16;
3302 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref1.DataLength, ptr + index);
3303 index += SIZEOF_UINT16;
3304 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref2.SlotNumber, ptr + index);
3305 index += SIZEOF_UINT16;
3306 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref2.DataLength, ptr + index);
3307 index += SIZEOF_UINT16;
3308 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.VirtualInterfaceIdentifier, ptr + index);
3309 index += SIZEOF_UINT16;
3310 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.ConnectionStatus, ptr + index);
3311 index += SIZEOF_UINT16;
3312 memcpy(ptr + index, sig->u.MlmeConnectedIndication.PeerMacAddress.x, 48 / 8);
3313 index += 48 / 8;
3314 break;
3315#endif
3316#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3317 case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
3318 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref1.SlotNumber, ptr + index);
3319 index += SIZEOF_UINT16;
3320 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref1.DataLength, ptr + index);
3321 index += SIZEOF_UINT16;
3322 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref2.SlotNumber, ptr + index);
3323 index += SIZEOF_UINT16;
3324 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref2.DataLength, ptr + index);
3325 index += SIZEOF_UINT16;
3326 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.VirtualInterfaceIdentifier, ptr + index);
3327 index += SIZEOF_UINT16;
3328 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.TriggerId, ptr + index);
3329 index += SIZEOF_UINT16;
3330 break;
3331#endif
3332#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3333 case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
3334 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.MibAttributeValue.SlotNumber, ptr + index);
3335 index += SIZEOF_UINT16;
3336 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.MibAttributeValue.DataLength, ptr + index);
3337 index += SIZEOF_UINT16;
3338 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.Dummydataref2.SlotNumber, ptr + index);
3339 index += SIZEOF_UINT16;
3340 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.Dummydataref2.DataLength, ptr + index);
3341 index += SIZEOF_UINT16;
3342 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.VirtualInterfaceIdentifier, ptr + index);
3343 index += SIZEOF_UINT16;
3344 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.Status, ptr + index);
3345 index += SIZEOF_UINT16;
3346 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.ErrorIndex, ptr + index);
3347 index += SIZEOF_UINT16;
3348 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.TriggeredId, ptr + index);
3349 index += SIZEOF_UINT16;
3350 break;
3351#endif
3352#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3353 case CSR_MLME_SCAN_REQUEST_ID:
3354 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ChannelList.SlotNumber, ptr + index);
3355 index += SIZEOF_UINT16;
3356 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ChannelList.DataLength, ptr + index);
3357 index += SIZEOF_UINT16;
3358 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.InformationElements.SlotNumber, ptr + index);
3359 index += SIZEOF_UINT16;
3360 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.InformationElements.DataLength, ptr + index);
3361 index += SIZEOF_UINT16;
3362 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.VirtualInterfaceIdentifier, ptr + index);
3363 index += SIZEOF_UINT16;
3364 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.Ifindex, ptr + index);
3365 index += SIZEOF_UINT16;
3366 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ScanType, ptr + index);
3367 index += SIZEOF_UINT16;
3368 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ProbeDelay, ptr + index);
3369 index += SIZEOF_UINT32;
3370 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.MinChannelTime, ptr + index);
3371 index += SIZEOF_UINT16;
3372 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.MaxChannelTime, ptr + index);
3373 index += SIZEOF_UINT16;
3374 break;
3375#endif
3376#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3377 case CSR_MLME_DELETEKEYS_CONFIRM_ID:
3378 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref1.SlotNumber, ptr + index);
3379 index += SIZEOF_UINT16;
3380 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref1.DataLength, ptr + index);
3381 index += SIZEOF_UINT16;
3382 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref2.SlotNumber, ptr + index);
3383 index += SIZEOF_UINT16;
3384 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref2.DataLength, ptr + index);
3385 index += SIZEOF_UINT16;
3386 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.VirtualInterfaceIdentifier, ptr + index);
3387 index += SIZEOF_UINT16;
3388 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.ResultCode, ptr + index);
3389 index += SIZEOF_UINT16;
3390 break;
3391#endif
3392#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3393 case CSR_MLME_GET_NEXT_REQUEST_ID:
3394 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.MibAttribute.SlotNumber, ptr + index);
3395 index += SIZEOF_UINT16;
3396 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.MibAttribute.DataLength, ptr + index);
3397 index += SIZEOF_UINT16;
3398 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.Dummydataref2.SlotNumber, ptr + index);
3399 index += SIZEOF_UINT16;
3400 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.Dummydataref2.DataLength, ptr + index);
3401 index += SIZEOF_UINT16;
3402 break;
3403#endif
3404#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3405 case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
3406 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref1.SlotNumber, ptr + index);
3407 index += SIZEOF_UINT16;
3408 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref1.DataLength, ptr + index);
3409 index += SIZEOF_UINT16;
3410 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref2.SlotNumber, ptr + index);
3411 index += SIZEOF_UINT16;
3412 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref2.DataLength, ptr + index);
3413 index += SIZEOF_UINT16;
3414 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.VirtualInterfaceIdentifier, ptr + index);
3415 index += SIZEOF_UINT16;
3416 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.ResultCode, ptr + index);
3417 index += SIZEOF_UINT16;
3418 break;
3419#endif
3420#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3421 case CSR_MLME_START_AGGREGATION_REQUEST_ID:
3422 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref1.SlotNumber, ptr + index);
3423 index += SIZEOF_UINT16;
3424 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref1.DataLength, ptr + index);
3425 index += SIZEOF_UINT16;
3426 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref2.SlotNumber, ptr + index);
3427 index += SIZEOF_UINT16;
3428 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref2.DataLength, ptr + index);
3429 index += SIZEOF_UINT16;
3430 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.VirtualInterfaceIdentifier, ptr + index);
3431 index += SIZEOF_UINT16;
3432 memcpy(ptr + index, sig->u.MlmeStartAggregationRequest.PeerQstaAddress.x, 48 / 8);
3433 index += 48 / 8;
3434 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.UserPriority, ptr + index);
3435 index += SIZEOF_UINT16;
3436 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Direction, ptr + index);
3437 index += SIZEOF_UINT16;
3438 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.StartingSequenceNumber, ptr + index);
3439 index += SIZEOF_UINT16;
3440 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.BufferSize, ptr + index);
3441 index += SIZEOF_UINT16;
3442 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.BlockAckTimeout, ptr + index);
3443 index += SIZEOF_UINT16;
3444 break;
3445#endif
3446#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3447 case CSR_MLME_HL_SYNC_REQUEST_ID:
3448 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref1.SlotNumber, ptr + index);
3449 index += SIZEOF_UINT16;
3450 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref1.DataLength, ptr + index);
3451 index += SIZEOF_UINT16;
3452 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref2.SlotNumber, ptr + index);
3453 index += SIZEOF_UINT16;
3454 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref2.DataLength, ptr + index);
3455 index += SIZEOF_UINT16;
3456 memcpy(ptr + index, sig->u.MlmeHlSyncRequest.GroupAddress.x, 48 / 8);
3457 index += 48 / 8;
3458 break;
3459#endif
3460 case CSR_DEBUG_GENERIC_REQUEST_ID:
3461 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugVariable.SlotNumber, ptr + index);
3462 index += SIZEOF_UINT16;
3463 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugVariable.DataLength, ptr + index);
3464 index += SIZEOF_UINT16;
3465 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.Dummydataref2.SlotNumber, ptr + index);
3466 index += SIZEOF_UINT16;
3467 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.Dummydataref2.DataLength, ptr + index);
3468 index += SIZEOF_UINT16;
3469 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[0], ptr + index);
3470 index += SIZEOF_UINT16;
3471 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[1], ptr + index);
3472 index += SIZEOF_UINT16;
3473 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[2], ptr + index);
3474 index += SIZEOF_UINT16;
3475 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[3], ptr + index);
3476 index += SIZEOF_UINT16;
3477 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[4], ptr + index);
3478 index += SIZEOF_UINT16;
3479 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[5], ptr + index);
3480 index += SIZEOF_UINT16;
3481 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[6], ptr + index);
3482 index += SIZEOF_UINT16;
3483 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[7], ptr + index);
3484 index += SIZEOF_UINT16;
3485 break;
3486#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3487 case CSR_MLME_LEAVE_CONFIRM_ID:
3488 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref1.SlotNumber, ptr + index);
3489 index += SIZEOF_UINT16;
3490 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref1.DataLength, ptr + index);
3491 index += SIZEOF_UINT16;
3492 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref2.SlotNumber, ptr + index);
3493 index += SIZEOF_UINT16;
3494 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref2.DataLength, ptr + index);
3495 index += SIZEOF_UINT16;
3496 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.VirtualInterfaceIdentifier, ptr + index);
3497 index += SIZEOF_UINT16;
3498 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.ResultCode, ptr + index);
3499 index += SIZEOF_UINT16;
3500 break;
3501#endif
3502#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3503 case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
3504 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.SlotNumber, ptr + index);
3505 index += SIZEOF_UINT16;
3506 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.DataLength, ptr + index);
3507 index += SIZEOF_UINT16;
3508 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.SlotNumber, ptr + index);
3509 index += SIZEOF_UINT16;
3510 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.DataLength, ptr + index);
3511 index += SIZEOF_UINT16;
3512 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.VirtualInterfaceIdentifier, ptr + index);
3513 index += SIZEOF_UINT16;
3514 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.TriggeredId, ptr + index);
3515 index += SIZEOF_UINT16;
3516 break;
3517#endif
3518#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3519 case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
3520 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Data.SlotNumber, ptr + index);
3521 index += SIZEOF_UINT16;
3522 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Data.DataLength, ptr + index);
3523 index += SIZEOF_UINT16;
3524 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.SlotNumber, ptr + index);
3525 index += SIZEOF_UINT16;
3526 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.DataLength, ptr + index);
3527 index += SIZEOF_UINT16;
3528 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.VirtualInterfaceIdentifier, ptr + index);
3529 index += SIZEOF_UINT16;
3530 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.NumberOfMulticastGroupAddresses, ptr + index);
3531 index += SIZEOF_UINT16;
3532 break;
3533#endif
3534#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3535 case CSR_MLME_RESET_REQUEST_ID:
3536 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref1.SlotNumber, ptr + index);
3537 index += SIZEOF_UINT16;
3538 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref1.DataLength, ptr + index);
3539 index += SIZEOF_UINT16;
3540 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref2.SlotNumber, ptr + index);
3541 index += SIZEOF_UINT16;
3542 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref2.DataLength, ptr + index);
3543 index += SIZEOF_UINT16;
3544 memcpy(ptr + index, sig->u.MlmeResetRequest.StaAddress.x, 48 / 8);
3545 index += 48 / 8;
3546 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.SetDefaultMib, ptr + index);
3547 index += SIZEOF_UINT16;
3548 break;
3549#endif
3550#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3551 case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
3552 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref1.SlotNumber, ptr + index);
3553 index += SIZEOF_UINT16;
3554 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref1.DataLength, ptr + index);
3555 index += SIZEOF_UINT16;
3556 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref2.SlotNumber, ptr + index);
3557 index += SIZEOF_UINT16;
3558 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref2.DataLength, ptr + index);
3559 index += SIZEOF_UINT16;
3560 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.VirtualInterfaceIdentifier, ptr + index);
3561 index += SIZEOF_UINT16;
3562 break;
3563#endif
3564#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3565 case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
3566 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.SlotNumber, ptr + index);
3567 index += SIZEOF_UINT16;
3568 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.DataLength, ptr + index);
3569 index += SIZEOF_UINT16;
3570 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.SlotNumber, ptr + index);
3571 index += SIZEOF_UINT16;
3572 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.DataLength, ptr + index);
3573 index += SIZEOF_UINT16;
3574 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.VirtualInterfaceIdentifier, ptr + index);
3575 index += SIZEOF_UINT16;
3576 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.ResultCode, ptr + index);
3577 index += SIZEOF_UINT16;
3578 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.TriggeredId, ptr + index);
3579 index += SIZEOF_UINT16;
3580 break;
3581#endif
3582#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3583 case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
3584 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.InformationElements.SlotNumber, ptr + index);
3585 index += SIZEOF_UINT16;
3586 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.InformationElements.DataLength, ptr + index);
3587 index += SIZEOF_UINT16;
3588 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.Dummydataref2.SlotNumber, ptr + index);
3589 index += SIZEOF_UINT16;
3590 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.Dummydataref2.DataLength, ptr + index);
3591 index += SIZEOF_UINT16;
3592 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.VirtualInterfaceIdentifier, ptr + index);
3593 index += SIZEOF_UINT16;
3594 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.PacketFilterMode, ptr + index);
3595 index += SIZEOF_UINT16;
3596 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.ArpFilterAddress, ptr + index);
3597 index += SIZEOF_UINT32;
3598 break;
3599#endif
3600#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3601 case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
3602 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.SlotNumber, ptr + index);
3603 index += SIZEOF_UINT16;
3604 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.DataLength, ptr + index);
3605 index += SIZEOF_UINT16;
3606 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.SlotNumber, ptr + index);
3607 index += SIZEOF_UINT16;
3608 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.DataLength, ptr + index);
3609 index += SIZEOF_UINT16;
3610 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.VirtualInterfaceIdentifier, ptr + index);
3611 index += SIZEOF_UINT16;
3612 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.TriggerId, ptr + index);
3613 index += SIZEOF_UINT16;
3614 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.ResultCode, ptr + index);
3615 index += SIZEOF_UINT16;
3616 break;
3617#endif
3618#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3619 case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
3620 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.InformationElements.SlotNumber, ptr + index);
3621 index += SIZEOF_UINT16;
3622 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.InformationElements.DataLength, ptr + index);
3623 index += SIZEOF_UINT16;
3624 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.Dummydataref2.SlotNumber, ptr + index);
3625 index += SIZEOF_UINT16;
3626 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.Dummydataref2.DataLength, ptr + index);
3627 index += SIZEOF_UINT16;
3628 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.VirtualInterfaceIdentifier, ptr + index);
3629 index += SIZEOF_UINT16;
3630 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.ConnectionStatus, ptr + index);
3631 index += SIZEOF_UINT16;
3632 memcpy(ptr + index, sig->u.MlmeConnectStatusRequest.StaAddress.x, 48 / 8);
3633 index += 48 / 8;
3634 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.AssociationId, ptr + index);
3635 index += SIZEOF_UINT16;
3636 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.AssociationCapabilityInformation, ptr + index);
3637 index += SIZEOF_UINT16;
3638 break;
3639#endif
3640#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3641 case CSR_MLME_LEAVE_REQUEST_ID:
3642 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref1.SlotNumber, ptr + index);
3643 index += SIZEOF_UINT16;
3644 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref1.DataLength, ptr + index);
3645 index += SIZEOF_UINT16;
3646 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref2.SlotNumber, ptr + index);
3647 index += SIZEOF_UINT16;
3648 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref2.DataLength, ptr + index);
3649 index += SIZEOF_UINT16;
3650 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.VirtualInterfaceIdentifier, ptr + index);
3651 index += SIZEOF_UINT16;
3652 break;
3653#endif
3654#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3655 case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
3656 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref1.SlotNumber, ptr + index);
3657 index += SIZEOF_UINT16;
3658 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref1.DataLength, ptr + index);
3659 index += SIZEOF_UINT16;
3660 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref2.SlotNumber, ptr + index);
3661 index += SIZEOF_UINT16;
3662 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref2.DataLength, ptr + index);
3663 index += SIZEOF_UINT16;
3664 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.QueueIndex, ptr + index);
3665 index += SIZEOF_UINT16;
3666 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Aifs, ptr + index);
3667 index += SIZEOF_UINT16;
3668 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Cwmin, ptr + index);
3669 index += SIZEOF_UINT16;
3670 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Cwmax, ptr + index);
3671 index += SIZEOF_UINT16;
3672 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.TxopLimit, ptr + index);
3673 index += SIZEOF_UINT16;
3674 break;
3675#endif
3676#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3677 case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
3678 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref1.SlotNumber, ptr + index);
3679 index += SIZEOF_UINT16;
3680 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref1.DataLength, ptr + index);
3681 index += SIZEOF_UINT16;
3682 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref2.SlotNumber, ptr + index);
3683 index += SIZEOF_UINT16;
3684 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref2.DataLength, ptr + index);
3685 index += SIZEOF_UINT16;
3686 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.VirtualInterfaceIdentifier, ptr + index);
3687 index += SIZEOF_UINT16;
3688 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.UserPriority, ptr + index);
3689 index += SIZEOF_UINT16;
3690 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.ResultCode, ptr + index);
3691 index += SIZEOF_UINT16;
3692 break;
3693#endif
3694 case CSR_MLME_SET_TIM_CONFIRM_ID:
3695 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref1.SlotNumber, ptr + index);
3696 index += SIZEOF_UINT16;
3697 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref1.DataLength, ptr + index);
3698 index += SIZEOF_UINT16;
3699 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref2.SlotNumber, ptr + index);
3700 index += SIZEOF_UINT16;
3701 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref2.DataLength, ptr + index);
3702 index += SIZEOF_UINT16;
3703 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.VirtualInterfaceIdentifier, ptr + index);
3704 index += SIZEOF_UINT16;
3705 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.ResultCode, ptr + index);
3706 index += SIZEOF_UINT16;
3707 break;
3708#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3709 case CSR_MLME_MEASURE_INDICATION_ID:
3710 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.MeasurementReportSet.SlotNumber, ptr + index);
3711 index += SIZEOF_UINT16;
3712 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.MeasurementReportSet.DataLength, ptr + index);
3713 index += SIZEOF_UINT16;
3714 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.Dummydataref2.SlotNumber, ptr + index);
3715 index += SIZEOF_UINT16;
3716 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.Dummydataref2.DataLength, ptr + index);
3717 index += SIZEOF_UINT16;
3718 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.DialogToken, ptr + index);
3719 index += SIZEOF_UINT16;
3720 break;
3721#endif
3722#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3723 case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
3724 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref1.SlotNumber, ptr + index);
3725 index += SIZEOF_UINT16;
3726 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref1.DataLength, ptr + index);
3727 index += SIZEOF_UINT16;
3728 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref2.SlotNumber, ptr + index);
3729 index += SIZEOF_UINT16;
3730 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref2.DataLength, ptr + index);
3731 index += SIZEOF_UINT16;
3732 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.VirtualInterfaceIdentifier, ptr + index);
3733 index += SIZEOF_UINT16;
3734 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.BlackoutId, ptr + index);
3735 index += SIZEOF_UINT16;
3736 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.ResultCode, ptr + index);
3737 index += SIZEOF_UINT16;
3738 break;
3739#endif
3740#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3741 case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
3742 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.SlotNumber, ptr + index);
3743 index += SIZEOF_UINT16;
3744 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.DataLength, ptr + index);
3745 index += SIZEOF_UINT16;
3746 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.SlotNumber, ptr + index);
3747 index += SIZEOF_UINT16;
3748 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.DataLength, ptr + index);
3749 index += SIZEOF_UINT16;
3750 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.VirtualInterfaceIdentifier, ptr + index);
3751 index += SIZEOF_UINT16;
3752 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.ResultCode, ptr + index);
3753 index += SIZEOF_UINT16;
3754 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.TriggeredId, ptr + index);
3755 index += SIZEOF_UINT16;
3756 break;
3757#endif
3758 case CSR_DEBUG_GENERIC_INDICATION_ID:
3759 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugVariable.SlotNumber, ptr + index);
3760 index += SIZEOF_UINT16;
3761 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugVariable.DataLength, ptr + index);
3762 index += SIZEOF_UINT16;
3763 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.Dummydataref2.SlotNumber, ptr + index);
3764 index += SIZEOF_UINT16;
3765 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.Dummydataref2.DataLength, ptr + index);
3766 index += SIZEOF_UINT16;
3767 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[0], ptr + index);
3768 index += SIZEOF_UINT16;
3769 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[1], ptr + index);
3770 index += SIZEOF_UINT16;
3771 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[2], ptr + index);
3772 index += SIZEOF_UINT16;
3773 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[3], ptr + index);
3774 index += SIZEOF_UINT16;
3775 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[4], ptr + index);
3776 index += SIZEOF_UINT16;
3777 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[5], ptr + index);
3778 index += SIZEOF_UINT16;
3779 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[6], ptr + index);
3780 index += SIZEOF_UINT16;
3781 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[7], ptr + index);
3782 index += SIZEOF_UINT16;
3783 break;
3784 case CSR_MA_PACKET_CANCEL_REQUEST_ID:
3785 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref1.SlotNumber, ptr + index);
3786 index += SIZEOF_UINT16;
3787 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref1.DataLength, ptr + index);
3788 index += SIZEOF_UINT16;
3789 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref2.SlotNumber, ptr + index);
3790 index += SIZEOF_UINT16;
3791 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref2.DataLength, ptr + index);
3792 index += SIZEOF_UINT16;
3793 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.VirtualInterfaceIdentifier, ptr + index);
3794 index += SIZEOF_UINT16;
3795 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.HostTag, ptr + index);
3796 index += SIZEOF_UINT32;
3797 break;
3798#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3799 case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
3800 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.SlotNumber, ptr + index);
3801 index += SIZEOF_UINT16;
3802 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.DataLength, ptr + index);
3803 index += SIZEOF_UINT16;
3804 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.SlotNumber, ptr + index);
3805 index += SIZEOF_UINT16;
3806 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.DataLength, ptr + index);
3807 index += SIZEOF_UINT16;
3808 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.VirtualInterfaceIdentifier, ptr + index);
3809 index += SIZEOF_UINT16;
3810 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.ResultCode, ptr + index);
3811 index += SIZEOF_UINT16;
3812 break;
3813#endif
3814#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3815 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
3816 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.SlotNumber, ptr + index);
3817 index += SIZEOF_UINT16;
3818 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.DataLength, ptr + index);
3819 index += SIZEOF_UINT16;
3820 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.SlotNumber, ptr + index);
3821 index += SIZEOF_UINT16;
3822 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.DataLength, ptr + index);
3823 index += SIZEOF_UINT16;
3824 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.VirtualInterfaceIdentifier, ptr + index);
3825 index += SIZEOF_UINT16;
3826 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.ResultCode, ptr + index);
3827 index += SIZEOF_UINT16;
3828 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.AutonomousScanId, ptr + index);
3829 index += SIZEOF_UINT16;
3830 break;
3831#endif
3832 case CSR_MA_PACKET_REQUEST_ID:
3833 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Data.SlotNumber, ptr + index);
3834 index += SIZEOF_UINT16;
3835 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Data.DataLength, ptr + index);
3836 index += SIZEOF_UINT16;
3837 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Dummydataref2.SlotNumber, ptr + index);
3838 index += SIZEOF_UINT16;
3839 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Dummydataref2.DataLength, ptr + index);
3840 index += SIZEOF_UINT16;
3841 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.VirtualInterfaceIdentifier, ptr + index);
3842 index += SIZEOF_UINT16;
3843 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.TransmitRate, ptr + index);
3844 index += SIZEOF_UINT16;
3845 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.HostTag, ptr + index);
3846 index += SIZEOF_UINT32;
3847 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Priority, ptr + index);
3848 index += SIZEOF_UINT16;
3849 memcpy(ptr + index, sig->u.MaPacketRequest.Ra.x, 48 / 8);
3850 index += 48 / 8;
3851 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.TransmissionControl, ptr + index);
3852 index += SIZEOF_UINT16;
3853 break;
3854#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3855 case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
3856 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Data.SlotNumber, ptr + index);
3857 index += SIZEOF_UINT16;
3858 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Data.DataLength, ptr + index);
3859 index += SIZEOF_UINT16;
3860 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Dummydataref2.SlotNumber, ptr + index);
3861 index += SIZEOF_UINT16;
3862 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Dummydataref2.DataLength, ptr + index);
3863 index += SIZEOF_UINT16;
3864 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.VirtualInterfaceIdentifier, ptr + index);
3865 index += SIZEOF_UINT16;
3866 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.BeaconPeriod, ptr + index);
3867 index += SIZEOF_UINT16;
3868 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.DtimPeriod, ptr + index);
3869 index += SIZEOF_UINT16;
3870 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.CapabilityInformation, ptr + index);
3871 index += SIZEOF_UINT16;
3872 memcpy(ptr + index, sig->u.MlmeModifyBssParameterRequest.Bssid.x, 48 / 8);
3873 index += 48 / 8;
3874 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.RtsThreshold, ptr + index);
3875 index += SIZEOF_UINT16;
3876 break;
3877#endif
3878#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3879 case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
3880 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.InformationElements.SlotNumber, ptr + index);
3881 index += SIZEOF_UINT16;
3882 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.InformationElements.DataLength, ptr + index);
3883 index += SIZEOF_UINT16;
3884 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.Dummydataref2.SlotNumber, ptr + index);
3885 index += SIZEOF_UINT16;
3886 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.Dummydataref2.DataLength, ptr + index);
3887 index += SIZEOF_UINT16;
3888 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.VirtualInterfaceIdentifier, ptr + index);
3889 index += SIZEOF_UINT16;
3890 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.TriggerId, ptr + index);
3891 index += SIZEOF_UINT16;
3892 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.Priority, ptr + index);
3893 index += SIZEOF_UINT16;
3894 break;
3895#endif
3896 case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
3897 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref1.SlotNumber, ptr + index);
3898 index += SIZEOF_UINT16;
3899 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref1.DataLength, ptr + index);
3900 index += SIZEOF_UINT16;
3901 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref2.SlotNumber, ptr + index);
3902 index += SIZEOF_UINT16;
3903 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref2.DataLength, ptr + index);
3904 index += SIZEOF_UINT16;
3905 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.VirtualInterfaceIdentifier, ptr + index);
3906 index += SIZEOF_UINT16;
3907 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Multicast, ptr + index);
3908 index += SIZEOF_UINT16;
3909 break;
3910#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3911 case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
3912 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref1.SlotNumber, ptr + index);
3913 index += SIZEOF_UINT16;
3914 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref1.DataLength, ptr + index);
3915 index += SIZEOF_UINT16;
3916 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref2.SlotNumber, ptr + index);
3917 index += SIZEOF_UINT16;
3918 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref2.DataLength, ptr + index);
3919 index += SIZEOF_UINT16;
3920 memcpy(ptr + index, sig->u.MlmeHlSyncCancelRequest.GroupAddress.x, 48 / 8);
3921 index += 48 / 8;
3922 break;
3923#endif
3924#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3925 case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
3926 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.SlotNumber, ptr + index);
3927 index += SIZEOF_UINT16;
3928 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.DataLength, ptr + index);
3929 index += SIZEOF_UINT16;
3930 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.SlotNumber, ptr + index);
3931 index += SIZEOF_UINT16;
3932 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.DataLength, ptr + index);
3933 index += SIZEOF_UINT16;
3934 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.VirtualInterfaceIdentifier, ptr + index);
3935 index += SIZEOF_UINT16;
3936 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.AutonomousScanId, ptr + index);
3937 index += SIZEOF_UINT16;
3938 break;
3939#endif
3940#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3941 case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
3942 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref1.SlotNumber, ptr + index);
3943 index += SIZEOF_UINT16;
3944 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref1.DataLength, ptr + index);
3945 index += SIZEOF_UINT16;
3946 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref2.SlotNumber, ptr + index);
3947 index += SIZEOF_UINT16;
3948 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref2.DataLength, ptr + index);
3949 index += SIZEOF_UINT16;
3950 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.VirtualInterfaceIdentifier, ptr + index);
3951 index += SIZEOF_UINT16;
3952 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.BlackoutId, ptr + index);
3953 index += SIZEOF_UINT16;
3954 break;
3955#endif
3956#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3957 case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
3958 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.SlotNumber, ptr + index);
3959 index += SIZEOF_UINT16;
3960 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.DataLength, ptr + index);
3961 index += SIZEOF_UINT16;
3962 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.SlotNumber, ptr + index);
3963 index += SIZEOF_UINT16;
3964 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.DataLength, ptr + index);
3965 index += SIZEOF_UINT16;
3966 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.VirtualInterfaceIdentifier, ptr + index);
3967 index += SIZEOF_UINT16;
3968 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.ResultCode, ptr + index);
3969 index += SIZEOF_UINT16;
3970 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.AutonomousScanId, ptr + index);
3971 index += SIZEOF_UINT16;
3972 break;
3973#endif
3974#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3975 case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
3976 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref1.SlotNumber, ptr + index);
3977 index += SIZEOF_UINT16;
3978 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref1.DataLength, ptr + index);
3979 index += SIZEOF_UINT16;
3980 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref2.SlotNumber, ptr + index);
3981 index += SIZEOF_UINT16;
3982 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref2.DataLength, ptr + index);
3983 index += SIZEOF_UINT16;
3984 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.VirtualInterfaceIdentifier, ptr + index);
3985 index += SIZEOF_UINT16;
3986 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.KeyId, ptr + index);
3987 index += SIZEOF_UINT16;
3988 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.KeyType, ptr + index);
3989 index += SIZEOF_UINT16;
3990 memcpy(ptr + index, sig->u.MlmeGetKeySequenceRequest.Address.x, 48 / 8);
3991 index += 48 / 8;
3992 break;
3993#endif
3994#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
3995 case CSR_MLME_SET_CHANNEL_REQUEST_ID:
3996 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref1.SlotNumber, ptr + index);
3997 index += SIZEOF_UINT16;
3998 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref1.DataLength, ptr + index);
3999 index += SIZEOF_UINT16;
4000 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref2.SlotNumber, ptr + index);
4001 index += SIZEOF_UINT16;
4002 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref2.DataLength, ptr + index);
4003 index += SIZEOF_UINT16;
4004 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.VirtualInterfaceIdentifier, ptr + index);
4005 index += SIZEOF_UINT16;
4006 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Ifindex, ptr + index);
4007 index += SIZEOF_UINT16;
4008 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Channel, ptr + index);
4009 index += SIZEOF_UINT16;
4010 memcpy(ptr + index, sig->u.MlmeSetChannelRequest.Address.x, 48 / 8);
4011 index += 48 / 8;
4012 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.AvailabilityDuration, ptr + index);
4013 index += SIZEOF_UINT16;
4014 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.AvailabilityInterval, ptr + index);
4015 index += SIZEOF_UINT16;
4016 break;
4017#endif
4018#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4019 case CSR_MLME_MEASURE_CONFIRM_ID:
4020 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref1.SlotNumber, ptr + index);
4021 index += SIZEOF_UINT16;
4022 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref1.DataLength, ptr + index);
4023 index += SIZEOF_UINT16;
4024 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref2.SlotNumber, ptr + index);
4025 index += SIZEOF_UINT16;
4026 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref2.DataLength, ptr + index);
4027 index += SIZEOF_UINT16;
4028 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.ResultCode, ptr + index);
4029 index += SIZEOF_UINT16;
4030 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.DialogToken, ptr + index);
4031 index += SIZEOF_UINT16;
4032 break;
4033#endif
4034#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4035 case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
4036 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.MibAttribute.SlotNumber, ptr + index);
4037 index += SIZEOF_UINT16;
4038 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.MibAttribute.DataLength, ptr + index);
4039 index += SIZEOF_UINT16;
4040 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.SlotNumber, ptr + index);
4041 index += SIZEOF_UINT16;
4042 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.DataLength, ptr + index);
4043 index += SIZEOF_UINT16;
4044 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.VirtualInterfaceIdentifier, ptr + index);
4045 index += SIZEOF_UINT16;
4046 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.TriggeredId, ptr + index);
4047 index += SIZEOF_UINT16;
4048 break;
4049#endif
4050#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4051 case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
4052 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.SlotNumber, ptr + index);
4053 index += SIZEOF_UINT16;
4054 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.DataLength, ptr + index);
4055 index += SIZEOF_UINT16;
4056 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.SlotNumber, ptr + index);
4057 index += SIZEOF_UINT16;
4058 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.DataLength, ptr + index);
4059 index += SIZEOF_UINT16;
4060 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.VirtualInterfaceIdentifier, ptr + index);
4061 index += SIZEOF_UINT16;
4062 memcpy(ptr + index, sig->u.MlmeAutonomousScanLossIndication.Bssid.x, 48 / 8);
4063 index += 48 / 8;
4064 break;
4065#endif
4066 case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
4067 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref1.SlotNumber, ptr + index);
4068 index += SIZEOF_UINT16;
4069 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref1.DataLength, ptr + index);
4070 index += SIZEOF_UINT16;
4071 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref2.SlotNumber, ptr + index);
4072 index += SIZEOF_UINT16;
4073 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref2.DataLength, ptr + index);
4074 index += SIZEOF_UINT16;
4075 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.VirtualInterfaceIdentifier, ptr + index);
4076 index += SIZEOF_UINT16;
4077 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.ResultCode, ptr + index);
4078 index += SIZEOF_UINT16;
4079 break;
4080#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4081 case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
4082 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data1.SlotNumber, ptr + index);
4083 index += SIZEOF_UINT16;
4084 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data1.DataLength, ptr + index);
4085 index += SIZEOF_UINT16;
4086 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data2.SlotNumber, ptr + index);
4087 index += SIZEOF_UINT16;
4088 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data2.DataLength, ptr + index);
4089 index += SIZEOF_UINT16;
4090 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.VirtualInterfaceIdentifier, ptr + index);
4091 index += SIZEOF_UINT16;
4092 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.FrameType, ptr + index);
4093 index += SIZEOF_UINT16;
4094 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.MinTransmitRate, ptr + index);
4095 index += SIZEOF_UINT16;
4096 break;
4097#endif
4098#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4099 case CSR_MLME_POWERMGT_CONFIRM_ID:
4100 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref1.SlotNumber, ptr + index);
4101 index += SIZEOF_UINT16;
4102 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref1.DataLength, ptr + index);
4103 index += SIZEOF_UINT16;
4104 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref2.SlotNumber, ptr + index);
4105 index += SIZEOF_UINT16;
4106 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref2.DataLength, ptr + index);
4107 index += SIZEOF_UINT16;
4108 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.VirtualInterfaceIdentifier, ptr + index);
4109 index += SIZEOF_UINT16;
4110 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.ResultCode, ptr + index);
4111 index += SIZEOF_UINT16;
4112 break;
4113#endif
4114#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4115 case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
4116 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref1.SlotNumber, ptr + index);
4117 index += SIZEOF_UINT16;
4118 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref1.DataLength, ptr + index);
4119 index += SIZEOF_UINT16;
4120 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref2.SlotNumber, ptr + index);
4121 index += SIZEOF_UINT16;
4122 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref2.DataLength, ptr + index);
4123 index += SIZEOF_UINT16;
4124 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.VirtualInterfaceIdentifier, ptr + index);
4125 index += SIZEOF_UINT16;
4126 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.PeriodicId, ptr + index);
4127 index += SIZEOF_UINT16;
4128 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.ResultCode, ptr + index);
4129 index += SIZEOF_UINT16;
4130 break;
4131#endif
4132#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4133 case CSR_MLME_GET_CONFIRM_ID:
4134 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.MibAttributeValue.SlotNumber, ptr + index);
4135 index += SIZEOF_UINT16;
4136 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.MibAttributeValue.DataLength, ptr + index);
4137 index += SIZEOF_UINT16;
4138 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.Dummydataref2.SlotNumber, ptr + index);
4139 index += SIZEOF_UINT16;
4140 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.Dummydataref2.DataLength, ptr + index);
4141 index += SIZEOF_UINT16;
4142 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.Status, ptr + index);
4143 index += SIZEOF_UINT16;
4144 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.ErrorIndex, ptr + index);
4145 index += SIZEOF_UINT16;
4146 break;
4147#endif
4148#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4149 case CSR_MLME_GET_NEXT_CONFIRM_ID:
4150 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.MibAttributeValue.SlotNumber, ptr + index);
4151 index += SIZEOF_UINT16;
4152 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.MibAttributeValue.DataLength, ptr + index);
4153 index += SIZEOF_UINT16;
4154 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.Dummydataref2.SlotNumber, ptr + index);
4155 index += SIZEOF_UINT16;
4156 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.Dummydataref2.DataLength, ptr + index);
4157 index += SIZEOF_UINT16;
4158 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.Status, ptr + index);
4159 index += SIZEOF_UINT16;
4160 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.ErrorIndex, ptr + index);
4161 index += SIZEOF_UINT16;
4162 break;
4163#endif
4164#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4165 case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
4166 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref1.SlotNumber, ptr + index);
4167 index += SIZEOF_UINT16;
4168 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref1.DataLength, ptr + index);
4169 index += SIZEOF_UINT16;
4170 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref2.SlotNumber, ptr + index);
4171 index += SIZEOF_UINT16;
4172 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref2.DataLength, ptr + index);
4173 index += SIZEOF_UINT16;
4174 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.VirtualInterfaceIdentifier, ptr + index);
4175 index += SIZEOF_UINT16;
4176 memcpy(ptr + index, sig->u.MlmeStopAggregationRequest.PeerQstaAddress.x, 48 / 8);
4177 index += 48 / 8;
4178 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.UserPriority, ptr + index);
4179 index += SIZEOF_UINT16;
4180 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Direction, ptr + index);
4181 index += SIZEOF_UINT16;
4182 break;
4183#endif
4184#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4185 case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
4186 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.SlotNumber, ptr + index);
4187 index += SIZEOF_UINT16;
4188 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.DataLength, ptr + index);
4189 index += SIZEOF_UINT16;
4190 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.SlotNumber, ptr + index);
4191 index += SIZEOF_UINT16;
4192 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.DataLength, ptr + index);
4193 index += SIZEOF_UINT16;
4194 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.VirtualInterfaceIdentifier, ptr + index);
4195 index += SIZEOF_UINT16;
4196 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.TriggerId, ptr + index);
4197 index += SIZEOF_UINT16;
4198 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.ResultCode, ptr + index);
4199 index += SIZEOF_UINT16;
4200 break;
4201#endif
4202#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4203 case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
4204 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref1.SlotNumber, ptr + index);
4205 index += SIZEOF_UINT16;
4206 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref1.DataLength, ptr + index);
4207 index += SIZEOF_UINT16;
4208 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref2.SlotNumber, ptr + index);
4209 index += SIZEOF_UINT16;
4210 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref2.DataLength, ptr + index);
4211 index += SIZEOF_UINT16;
4212 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.VirtualInterfaceIdentifier, ptr + index);
4213 index += SIZEOF_UINT16;
4214 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutId, ptr + index);
4215 index += SIZEOF_UINT16;
4216 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutType, ptr + index);
4217 index += SIZEOF_UINT16;
4218 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutSource, ptr + index);
4219 index += SIZEOF_UINT16;
4220 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutStartReference, ptr + index);
4221 index += SIZEOF_UINT32;
4222 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutPeriod, ptr + index);
4223 index += SIZEOF_UINT32;
4224 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutDuration, ptr + index);
4225 index += SIZEOF_UINT32;
4226 memcpy(ptr + index, sig->u.MlmeAddBlackoutRequest.PeerStaAddress.x, 48 / 8);
4227 index += 48 / 8;
4228 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutCount, ptr + index);
4229 index += SIZEOF_UINT16;
4230 break;
4231#endif
4232#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4233 case CSR_MLME_DELETEKEYS_REQUEST_ID:
4234 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref1.SlotNumber, ptr + index);
4235 index += SIZEOF_UINT16;
4236 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref1.DataLength, ptr + index);
4237 index += SIZEOF_UINT16;
4238 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref2.SlotNumber, ptr + index);
4239 index += SIZEOF_UINT16;
4240 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref2.DataLength, ptr + index);
4241 index += SIZEOF_UINT16;
4242 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.VirtualInterfaceIdentifier, ptr + index);
4243 index += SIZEOF_UINT16;
4244 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.KeyId, ptr + index);
4245 index += SIZEOF_UINT16;
4246 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.KeyType, ptr + index);
4247 index += SIZEOF_UINT16;
4248 memcpy(ptr + index, sig->u.MlmeDeletekeysRequest.Address.x, 48 / 8);
4249 index += 48 / 8;
4250 break;
4251#endif
4252#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4253 case CSR_MLME_RESET_CONFIRM_ID:
4254 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref1.SlotNumber, ptr + index);
4255 index += SIZEOF_UINT16;
4256 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref1.DataLength, ptr + index);
4257 index += SIZEOF_UINT16;
4258 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref2.SlotNumber, ptr + index);
4259 index += SIZEOF_UINT16;
4260 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref2.DataLength, ptr + index);
4261 index += SIZEOF_UINT16;
4262 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.ResultCode, ptr + index);
4263 index += SIZEOF_UINT16;
4264 break;
4265#endif
4266#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4267 case CSR_MLME_HL_SYNC_CONFIRM_ID:
4268 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref1.SlotNumber, ptr + index);
4269 index += SIZEOF_UINT16;
4270 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref1.DataLength, ptr + index);
4271 index += SIZEOF_UINT16;
4272 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref2.SlotNumber, ptr + index);
4273 index += SIZEOF_UINT16;
4274 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref2.DataLength, ptr + index);
4275 index += SIZEOF_UINT16;
4276 memcpy(ptr + index, sig->u.MlmeHlSyncConfirm.GroupAddress.x, 48 / 8);
4277 index += 48 / 8;
4278 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.ResultCode, ptr + index);
4279 index += SIZEOF_UINT16;
4280 break;
4281#endif
4282#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4283 case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
4284 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ChannelList.SlotNumber, ptr + index);
4285 index += SIZEOF_UINT16;
4286 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ChannelList.DataLength, ptr + index);
4287 index += SIZEOF_UINT16;
4288 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.InformationElements.SlotNumber, ptr + index);
4289 index += SIZEOF_UINT16;
4290 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.InformationElements.DataLength, ptr + index);
4291 index += SIZEOF_UINT16;
4292 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.VirtualInterfaceIdentifier, ptr + index);
4293 index += SIZEOF_UINT16;
4294 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.AutonomousScanId, ptr + index);
4295 index += SIZEOF_UINT16;
4296 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.Ifindex, ptr + index);
4297 index += SIZEOF_UINT16;
4298 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ChannelStartingFactor, ptr + index);
4299 index += SIZEOF_UINT16;
4300 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ScanType, ptr + index);
4301 index += SIZEOF_UINT16;
4302 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ProbeDelay, ptr + index);
4303 index += SIZEOF_UINT32;
4304 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.MinChannelTime, ptr + index);
4305 index += SIZEOF_UINT16;
4306 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.MaxChannelTime, ptr + index);
4307 index += SIZEOF_UINT16;
4308 break;
4309#endif
4310#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4311 case CSR_MLME_SET_REQUEST_ID:
4312 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.MibAttributeValue.SlotNumber, ptr + index);
4313 index += SIZEOF_UINT16;
4314 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.MibAttributeValue.DataLength, ptr + index);
4315 index += SIZEOF_UINT16;
4316 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.Dummydataref2.SlotNumber, ptr + index);
4317 index += SIZEOF_UINT16;
4318 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.Dummydataref2.DataLength, ptr + index);
4319 index += SIZEOF_UINT16;
4320 break;
4321#endif
4322#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4323 case CSR_MLME_SM_START_REQUEST_ID:
4324 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Beacon.SlotNumber, ptr + index);
4325 index += SIZEOF_UINT16;
4326 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Beacon.DataLength, ptr + index);
4327 index += SIZEOF_UINT16;
4328 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.BssParameters.SlotNumber, ptr + index);
4329 index += SIZEOF_UINT16;
4330 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.BssParameters.DataLength, ptr + index);
4331 index += SIZEOF_UINT16;
4332 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.VirtualInterfaceIdentifier, ptr + index);
4333 index += SIZEOF_UINT16;
4334 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Ifindex, ptr + index);
4335 index += SIZEOF_UINT16;
4336 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Channel, ptr + index);
4337 index += SIZEOF_UINT16;
4338 memcpy(ptr + index, sig->u.MlmeSmStartRequest.InterfaceAddress.x, 48 / 8);
4339 index += 48 / 8;
4340 memcpy(ptr + index, sig->u.MlmeSmStartRequest.Bssid.x, 48 / 8);
4341 index += 48 / 8;
4342 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.BeaconPeriod, ptr + index);
4343 index += SIZEOF_UINT16;
4344 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.DtimPeriod, ptr + index);
4345 index += SIZEOF_UINT16;
4346 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.CapabilityInformation, ptr + index);
4347 index += SIZEOF_UINT16;
4348 break;
4349#endif
4350#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4351 case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
4352 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref1.SlotNumber, ptr + index);
4353 index += SIZEOF_UINT16;
4354 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref1.DataLength, ptr + index);
4355 index += SIZEOF_UINT16;
4356 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref2.SlotNumber, ptr + index);
4357 index += SIZEOF_UINT16;
4358 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref2.DataLength, ptr + index);
4359 index += SIZEOF_UINT16;
4360 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.VirtualInterfaceIdentifier, ptr + index);
4361 index += SIZEOF_UINT16;
4362 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.ResultCode, ptr + index);
4363 index += SIZEOF_UINT16;
4364 break;
4365#endif
4366#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4367 case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
4368 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.SlotNumber, ptr + index);
4369 index += SIZEOF_UINT16;
4370 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.DataLength, ptr + index);
4371 index += SIZEOF_UINT16;
4372 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.SlotNumber, ptr + index);
4373 index += SIZEOF_UINT16;
4374 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.DataLength, ptr + index);
4375 index += SIZEOF_UINT16;
4376 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.VirtualInterfaceIdentifier, ptr + index);
4377 index += SIZEOF_UINT16;
4378 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.ResultCode, ptr + index);
4379 index += SIZEOF_UINT16;
4380 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.AutonomousScanId, ptr + index);
4381 index += SIZEOF_UINT16;
4382 break;
4383#endif
4384#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4385 case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
4386 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref1.SlotNumber, ptr + index);
4387 index += SIZEOF_UINT16;
4388 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref1.DataLength, ptr + index);
4389 index += SIZEOF_UINT16;
4390 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref2.SlotNumber, ptr + index);
4391 index += SIZEOF_UINT16;
4392 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref2.DataLength, ptr + index);
4393 index += SIZEOF_UINT16;
4394 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.VirtualInterfaceIdentifier, ptr + index);
4395 index += SIZEOF_UINT16;
4396 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.PeriodicId, ptr + index);
4397 index += SIZEOF_UINT16;
4398 break;
4399#endif
4400#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4401 case CSR_MLME_SETKEYS_REQUEST_ID:
4402 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Key.SlotNumber, ptr + index);
4403 index += SIZEOF_UINT16;
4404 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Key.DataLength, ptr + index);
4405 index += SIZEOF_UINT16;
4406 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Dummydataref2.SlotNumber, ptr + index);
4407 index += SIZEOF_UINT16;
4408 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Dummydataref2.DataLength, ptr + index);
4409 index += SIZEOF_UINT16;
4410 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.VirtualInterfaceIdentifier, ptr + index);
4411 index += SIZEOF_UINT16;
4412 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Length, ptr + index);
4413 index += SIZEOF_UINT16;
4414 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.KeyId, ptr + index);
4415 index += SIZEOF_UINT16;
4416 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.KeyType, ptr + index);
4417 index += SIZEOF_UINT16;
4418 memcpy(ptr + index, sig->u.MlmeSetkeysRequest.Address.x, 48 / 8);
4419 index += 48 / 8;
4420 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[0], ptr + index);
4421 index += SIZEOF_UINT16;
4422 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[1], ptr + index);
4423 index += SIZEOF_UINT16;
4424 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[2], ptr + index);
4425 index += SIZEOF_UINT16;
4426 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[3], ptr + index);
4427 index += SIZEOF_UINT16;
4428 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[4], ptr + index);
4429 index += SIZEOF_UINT16;
4430 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[5], ptr + index);
4431 index += SIZEOF_UINT16;
4432 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[6], ptr + index);
4433 index += SIZEOF_UINT16;
4434 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[7], ptr + index);
4435 index += SIZEOF_UINT16;
4436 memcpy(ptr + index, &sig->u.MlmeSetkeysRequest.CipherSuiteSelector, 32 / 8);
4437 index += 32 / 8;
4438 break;
4439#endif
4440#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4441 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
4442 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.SlotNumber, ptr + index);
4443 index += SIZEOF_UINT16;
4444 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.DataLength, ptr + index);
4445 index += SIZEOF_UINT16;
4446 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.SlotNumber, ptr + index);
4447 index += SIZEOF_UINT16;
4448 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.DataLength, ptr + index);
4449 index += SIZEOF_UINT16;
4450 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.VirtualInterfaceIdentifier, ptr + index);
4451 index += SIZEOF_UINT16;
4452 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.AutonomousScanId, ptr + index);
4453 index += SIZEOF_UINT16;
4454 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Pause, ptr + index);
4455 index += SIZEOF_UINT16;
4456 break;
4457#endif
4458#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4459 case CSR_MLME_GET_REQUEST_ID:
4460 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.MibAttribute.SlotNumber, ptr + index);
4461 index += SIZEOF_UINT16;
4462 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.MibAttribute.DataLength, ptr + index);
4463 index += SIZEOF_UINT16;
4464 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.Dummydataref2.SlotNumber, ptr + index);
4465 index += SIZEOF_UINT16;
4466 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.Dummydataref2.DataLength, ptr + index);
4467 index += SIZEOF_UINT16;
4468 break;
4469#endif
4470#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4471 case CSR_MLME_POWERMGT_REQUEST_ID:
4472 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref1.SlotNumber, ptr + index);
4473 index += SIZEOF_UINT16;
4474 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref1.DataLength, ptr + index);
4475 index += SIZEOF_UINT16;
4476 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref2.SlotNumber, ptr + index);
4477 index += SIZEOF_UINT16;
4478 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref2.DataLength, ptr + index);
4479 index += SIZEOF_UINT16;
4480 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.VirtualInterfaceIdentifier, ptr + index);
4481 index += SIZEOF_UINT16;
4482 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.PowerManagementMode, ptr + index);
4483 index += SIZEOF_UINT16;
4484 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.ReceiveDtims, ptr + index);
4485 index += SIZEOF_UINT16;
4486 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.ListenInterval, ptr + index);
4487 index += SIZEOF_UINT16;
4488 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.TrafficWindow, ptr + index);
4489 index += SIZEOF_UINT16;
4490 break;
4491#endif
4492 case CSR_MA_PACKET_ERROR_INDICATION_ID:
4493 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref1.SlotNumber, ptr + index);
4494 index += SIZEOF_UINT16;
4495 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref1.DataLength, ptr + index);
4496 index += SIZEOF_UINT16;
4497 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref2.SlotNumber, ptr + index);
4498 index += SIZEOF_UINT16;
4499 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref2.DataLength, ptr + index);
4500 index += SIZEOF_UINT16;
4501 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.VirtualInterfaceIdentifier, ptr + index);
4502 index += SIZEOF_UINT16;
4503 memcpy(ptr + index, sig->u.MaPacketErrorIndication.PeerQstaAddress.x, 48 / 8);
4504 index += 48 / 8;
4505 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.UserPriority, ptr + index);
4506 index += SIZEOF_UINT16;
4507 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.SequenceNumber, ptr + index);
4508 index += SIZEOF_UINT16;
4509 break;
4510#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4511 case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
4512 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref1.SlotNumber, ptr + index);
4513 index += SIZEOF_UINT16;
4514 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref1.DataLength, ptr + index);
4515 index += SIZEOF_UINT16;
4516 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref2.SlotNumber, ptr + index);
4517 index += SIZEOF_UINT16;
4518 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref2.DataLength, ptr + index);
4519 index += SIZEOF_UINT16;
4520 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.VirtualInterfaceIdentifier, ptr + index);
4521 index += SIZEOF_UINT16;
4522 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.PeriodicId, ptr + index);
4523 index += SIZEOF_UINT16;
4524 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.MaximumLatency, ptr + index);
4525 index += SIZEOF_UINT32;
4526 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.PeriodicSchedulingMode, ptr + index);
4527 index += SIZEOF_UINT16;
4528 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.WakeHost, ptr + index);
4529 index += SIZEOF_UINT16;
4530 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.UserPriority, ptr + index);
4531 index += SIZEOF_UINT16;
4532 break;
4533#endif
4534#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4535 case CSR_MLME_ADD_TSPEC_REQUEST_ID:
4536 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref1.SlotNumber, ptr + index);
4537 index += SIZEOF_UINT16;
4538 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref1.DataLength, ptr + index);
4539 index += SIZEOF_UINT16;
4540 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref2.SlotNumber, ptr + index);
4541 index += SIZEOF_UINT16;
4542 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref2.DataLength, ptr + index);
4543 index += SIZEOF_UINT16;
4544 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.VirtualInterfaceIdentifier, ptr + index);
4545 index += SIZEOF_UINT16;
4546 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.UserPriority, ptr + index);
4547 index += SIZEOF_UINT16;
4548 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Direction, ptr + index);
4549 index += SIZEOF_UINT16;
4550 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.PsScheme, ptr + index);
4551 index += SIZEOF_UINT16;
4552 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.MediumTime, ptr + index);
4553 index += SIZEOF_UINT16;
4554 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.ServiceStartTime, ptr + index);
4555 index += SIZEOF_UINT32;
4556 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.ServiceInterval, ptr + index);
4557 index += SIZEOF_UINT32;
4558 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.MinimumDataRate, ptr + index);
4559 index += SIZEOF_UINT16;
4560 break;
4561#endif
4562#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4563 case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
4564 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.SlotNumber, ptr + index);
4565 index += SIZEOF_UINT16;
4566 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.DataLength, ptr + index);
4567 index += SIZEOF_UINT16;
4568 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.SlotNumber, ptr + index);
4569 index += SIZEOF_UINT16;
4570 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.DataLength, ptr + index);
4571 index += SIZEOF_UINT16;
4572 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.VirtualInterfaceIdentifier, ptr + index);
4573 index += SIZEOF_UINT16;
4574 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.ResultCode, ptr + index);
4575 index += SIZEOF_UINT16;
4576 break;
4577#endif
4578#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4579 case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
4580 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref1.SlotNumber, ptr + index);
4581 index += SIZEOF_UINT16;
4582 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref1.DataLength, ptr + index);
4583 index += SIZEOF_UINT16;
4584 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref2.SlotNumber, ptr + index);
4585 index += SIZEOF_UINT16;
4586 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref2.DataLength, ptr + index);
4587 index += SIZEOF_UINT16;
4588 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.VirtualInterfaceIdentifier, ptr + index);
4589 index += SIZEOF_UINT16;
4590 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.UserPriority, ptr + index);
4591 index += SIZEOF_UINT16;
4592 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.ResultCode, ptr + index);
4593 index += SIZEOF_UINT16;
4594 break;
4595#endif
4596#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4597 case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
4598 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.SlotNumber, ptr + index);
4599 index += SIZEOF_UINT16;
4600 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.DataLength, ptr + index);
4601 index += SIZEOF_UINT16;
4602 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.SlotNumber, ptr + index);
4603 index += SIZEOF_UINT16;
4604 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.DataLength, ptr + index);
4605 index += SIZEOF_UINT16;
4606 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.ResultCode, ptr + index);
4607 index += SIZEOF_UINT16;
4608 break;
4609#endif
4610#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4611 case CSR_MLME_SCAN_CONFIRM_ID:
4612 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref1.SlotNumber, ptr + index);
4613 index += SIZEOF_UINT16;
4614 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref1.DataLength, ptr + index);
4615 index += SIZEOF_UINT16;
4616 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref2.SlotNumber, ptr + index);
4617 index += SIZEOF_UINT16;
4618 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref2.DataLength, ptr + index);
4619 index += SIZEOF_UINT16;
4620 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.VirtualInterfaceIdentifier, ptr + index);
4621 index += SIZEOF_UINT16;
4622 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.ResultCode, ptr + index);
4623 index += SIZEOF_UINT16;
4624 break;
4625#endif
4626 case CSR_DEBUG_STRING_INDICATION_ID:
4627 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.DebugMessage.SlotNumber, ptr + index);
4628 index += SIZEOF_UINT16;
4629 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.DebugMessage.DataLength, ptr + index);
4630 index += SIZEOF_UINT16;
4631 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.Dummydataref2.SlotNumber, ptr + index);
4632 index += SIZEOF_UINT16;
4633 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.Dummydataref2.DataLength, ptr + index);
4634 index += SIZEOF_UINT16;
4635 break;
4636#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4637 case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
4638 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref1.SlotNumber, ptr + index);
4639 index += SIZEOF_UINT16;
4640 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref1.DataLength, ptr + index);
4641 index += SIZEOF_UINT16;
4642 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref2.SlotNumber, ptr + index);
4643 index += SIZEOF_UINT16;
4644 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref2.DataLength, ptr + index);
4645 index += SIZEOF_UINT16;
4646 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.VirtualInterfaceIdentifier, ptr + index);
4647 index += SIZEOF_UINT16;
4648 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.FrameType, ptr + index);
4649 index += SIZEOF_UINT16;
4650 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.ResultCode, ptr + index);
4651 index += SIZEOF_UINT16;
4652 break;
4653#endif
4654#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4655 case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
4656 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref1.SlotNumber, ptr + index);
4657 index += SIZEOF_UINT16;
4658 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref1.DataLength, ptr + index);
4659 index += SIZEOF_UINT16;
4660 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref2.SlotNumber, ptr + index);
4661 index += SIZEOF_UINT16;
4662 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref2.DataLength, ptr + index);
4663 index += SIZEOF_UINT16;
4664 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.VirtualInterfaceIdentifier, ptr + index);
4665 index += SIZEOF_UINT16;
4666 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.ResultCode, ptr + index);
4667 index += SIZEOF_UINT16;
4668 memcpy(ptr + index, sig->u.MlmeBlockackErrorIndication.PeerQstaAddress.x, 48 / 8);
4669 index += 48 / 8;
4670 break;
4671#endif
4672#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4673 case CSR_MLME_SET_CONFIRM_ID:
4674 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.MibAttributeValue.SlotNumber, ptr + index);
4675 index += SIZEOF_UINT16;
4676 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.MibAttributeValue.DataLength, ptr + index);
4677 index += SIZEOF_UINT16;
4678 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.Dummydataref2.SlotNumber, ptr + index);
4679 index += SIZEOF_UINT16;
4680 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.Dummydataref2.DataLength, ptr + index);
4681 index += SIZEOF_UINT16;
4682 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.Status, ptr + index);
4683 index += SIZEOF_UINT16;
4684 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.ErrorIndex, ptr + index);
4685 index += SIZEOF_UINT16;
4686 break;
4687#endif
4688#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4689 case CSR_MLME_MEASURE_REQUEST_ID:
4690 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.MeasurementRequestSet.SlotNumber, ptr + index);
4691 index += SIZEOF_UINT16;
4692 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.MeasurementRequestSet.DataLength, ptr + index);
4693 index += SIZEOF_UINT16;
4694 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.Dummydataref2.SlotNumber, ptr + index);
4695 index += SIZEOF_UINT16;
4696 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.Dummydataref2.DataLength, ptr + index);
4697 index += SIZEOF_UINT16;
4698 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.DialogToken, ptr + index);
4699 index += SIZEOF_UINT16;
4700 break;
4701#endif
4702#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4703 case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
4704 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref1.SlotNumber, ptr + index);
4705 index += SIZEOF_UINT16;
4706 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref1.DataLength, ptr + index);
4707 index += SIZEOF_UINT16;
4708 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref2.SlotNumber, ptr + index);
4709 index += SIZEOF_UINT16;
4710 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref2.DataLength, ptr + index);
4711 index += SIZEOF_UINT16;
4712 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.VirtualInterfaceIdentifier, ptr + index);
4713 index += SIZEOF_UINT16;
4714 memcpy(ptr + index, sig->u.MlmeStartAggregationConfirm.PeerQstaAddress.x, 48 / 8);
4715 index += 48 / 8;
4716 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.UserPriority, ptr + index);
4717 index += SIZEOF_UINT16;
4718 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Direction, ptr + index);
4719 index += SIZEOF_UINT16;
4720 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.ResultCode, ptr + index);
4721 index += SIZEOF_UINT16;
4722 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.SequenceNumber, ptr + index);
4723 index += SIZEOF_UINT16;
4724 break;
4725#endif
4726#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4727 case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
4728 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref1.SlotNumber, ptr + index);
4729 index += SIZEOF_UINT16;
4730 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref1.DataLength, ptr + index);
4731 index += SIZEOF_UINT16;
4732 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref2.SlotNumber, ptr + index);
4733 index += SIZEOF_UINT16;
4734 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref2.DataLength, ptr + index);
4735 index += SIZEOF_UINT16;
4736 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.ResultCode, ptr + index);
4737 index += SIZEOF_UINT16;
4738 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.DialogToken, ptr + index);
4739 index += SIZEOF_UINT16;
4740 break;
4741#endif
4742 case CSR_MA_PACKET_CONFIRM_ID:
4743 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref1.SlotNumber, ptr + index);
4744 index += SIZEOF_UINT16;
4745 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref1.DataLength, ptr + index);
4746 index += SIZEOF_UINT16;
4747 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref2.SlotNumber, ptr + index);
4748 index += SIZEOF_UINT16;
4749 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref2.DataLength, ptr + index);
4750 index += SIZEOF_UINT16;
4751 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.VirtualInterfaceIdentifier, ptr + index);
4752 index += SIZEOF_UINT16;
4753 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.TransmissionStatus, ptr + index);
4754 index += SIZEOF_UINT16;
4755 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.RetryCount, ptr + index);
4756 index += SIZEOF_UINT16;
4757 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Rate, ptr + index);
4758 index += SIZEOF_UINT16;
4759 CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.HostTag, ptr + index);
4760 index += SIZEOF_UINT32;
4761 break;
4762#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4763 case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
4764 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref1.SlotNumber, ptr + index);
4765 index += SIZEOF_UINT16;
4766 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref1.DataLength, ptr + index);
4767 index += SIZEOF_UINT16;
4768 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref2.SlotNumber, ptr + index);
4769 index += SIZEOF_UINT16;
4770 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref2.DataLength, ptr + index);
4771 index += SIZEOF_UINT16;
4772 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.VirtualInterfaceIdentifier, ptr + index);
4773 index += SIZEOF_UINT16;
4774 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.PeriodicId, ptr + index);
4775 index += SIZEOF_UINT16;
4776 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.ResultCode, ptr + index);
4777 index += SIZEOF_UINT16;
4778 break;
4779#endif
4780#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
4781 case CSR_MLME_STOP_MEASURE_REQUEST_ID:
4782 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref1.SlotNumber, ptr + index);
4783 index += SIZEOF_UINT16;
4784 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref1.DataLength, ptr + index);
4785 index += SIZEOF_UINT16;
4786 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref2.SlotNumber, ptr + index);
4787 index += SIZEOF_UINT16;
4788 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref2.DataLength, ptr + index);
4789 index += SIZEOF_UINT16;
4790 CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.DialogToken, ptr + index);
4791 index += SIZEOF_UINT16;
4792 break;
4793#endif
4794
4795 default:
4796 return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
4797 }
4798
4799 *sig_len = index;
4800
4801 return CSR_RESULT_SUCCESS;
4802} /* write_pack() */
4803
4804
diff --git a/drivers/staging/csr/csr_wifi_hip_send.c b/drivers/staging/csr/csr_wifi_hip_send.c
deleted file mode 100644
index 76429e5e77cf..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_send.c
+++ /dev/null
@@ -1,415 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 * ***************************************************************************
13 *
14 * FILE: csr_wifi_hip_send.c
15 *
16 * PURPOSE:
17 * Code for adding a signal request to the from-host queue.
18 * When the driver bottom-half is run, it will take requests from the
19 * queue and pass them to the UniFi.
20 *
21 * ***************************************************************************
22 */
23#include "csr_wifi_hip_unifi.h"
24#include "csr_wifi_hip_conversions.h"
25#include "csr_wifi_hip_sigs.h"
26#include "csr_wifi_hip_card.h"
27
28unifi_TrafficQueue unifi_frame_priority_to_queue(CSR_PRIORITY priority)
29{
30 switch (priority)
31 {
32 case CSR_QOS_UP0:
33 case CSR_QOS_UP3:
34 return UNIFI_TRAFFIC_Q_BE;
35 case CSR_QOS_UP1:
36 case CSR_QOS_UP2:
37 return UNIFI_TRAFFIC_Q_BK;
38 case CSR_QOS_UP4:
39 case CSR_QOS_UP5:
40 return UNIFI_TRAFFIC_Q_VI;
41 case CSR_QOS_UP6:
42 case CSR_QOS_UP7:
43 case CSR_MANAGEMENT:
44 return UNIFI_TRAFFIC_Q_VO;
45 default:
46 return UNIFI_TRAFFIC_Q_BE;
47 }
48}
49
50
51CSR_PRIORITY unifi_get_default_downgrade_priority(unifi_TrafficQueue queue)
52{
53 switch (queue)
54 {
55 case UNIFI_TRAFFIC_Q_BE:
56 return CSR_QOS_UP0;
57 case UNIFI_TRAFFIC_Q_BK:
58 return CSR_QOS_UP1;
59 case UNIFI_TRAFFIC_Q_VI:
60 return CSR_QOS_UP5;
61 case UNIFI_TRAFFIC_Q_VO:
62 return CSR_QOS_UP6;
63 default:
64 return CSR_QOS_UP0;
65 }
66}
67
68
69/*
70 * ---------------------------------------------------------------------------
71 * send_signal
72 *
73 * This function queues a signal for sending to UniFi. It first checks
74 * that there is space on the fh_signal_queue for another entry, then
75 * claims any bulk data slots required and copies data into them. Then
76 * increments the fh_signal_queue write count.
77 *
78 * The fh_signal_queue is later processed by the driver bottom half
79 * (in unifi_bh()).
80 *
81 * This function call unifi_pause_xmit() to pause the flow of data plane
82 * packets when:
83 * - the fh_signal_queue ring buffer is full
84 * - there are less than UNIFI_MAX_DATA_REFERENCES (2) bulk data
85 * slots available.
86 *
87 * Arguments:
88 * card Pointer to card context structure
89 * sigptr Pointer to the signal to write to UniFi.
90 * siglen Number of bytes pointer to by sigptr.
91 * bulkdata Array of pointers to an associated bulk data.
92 * sigq To which from-host queue to add the signal.
93 *
94 * Returns:
95 * CSR_RESULT_SUCCESS on success
96 * CSR_WIFI_HIP_RESULT_NO_SPACE if there were insufficient data slots or
97 * no free signal queue entry
98 *
99 * Notes:
100 * Calls unifi_pause_xmit() when the last slots are used.
101 * ---------------------------------------------------------------------------
102 */
103static CsrResult send_signal(card_t *card, const u8 *sigptr, u32 siglen,
104 const bulk_data_param_t *bulkdata,
105 q_t *sigq, u32 priority_q, u32 run_bh)
106{
107 u16 i, data_slot_size;
108 card_signal_t *csptr;
109 s16 qe;
110 CsrResult r;
111 s16 debug_print = 0;
112
113 data_slot_size = CardGetDataSlotSize(card);
114
115 /* Check that the fh_data_queue has a free slot */
116 if (!CSR_WIFI_HIP_Q_SLOTS_FREE(sigq))
117 {
118 unifi_trace(card->ospriv, UDBG3, "send_signal: %s full\n", sigq->name);
119
120 return CSR_WIFI_HIP_RESULT_NO_SPACE;
121 }
122
123 /*
124 * Now add the signal to the From Host signal queue
125 */
126 /* Get next slot on queue */
127 qe = CSR_WIFI_HIP_Q_NEXT_W_SLOT(sigq);
128 csptr = CSR_WIFI_HIP_Q_SLOT_DATA(sigq, qe);
129
130 /* Make up the card_signal struct */
131 csptr->signal_length = (u16)siglen;
132 memcpy((void *)csptr->sigbuf, (void *)sigptr, siglen);
133
134 for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; ++i)
135 {
136 if ((bulkdata != NULL) && (bulkdata->d[i].data_length != 0))
137 {
138 u32 datalen = bulkdata->d[i].data_length;
139
140 /* Make sure data will fit in a bulk data slot */
141 if (bulkdata->d[i].os_data_ptr == NULL)
142 {
143 unifi_error(card->ospriv, "send_signal - NULL bulkdata[%d]\n", i);
144 debug_print++;
145 csptr->bulkdata[i].data_length = 0;
146 }
147 else
148 {
149 if (datalen > data_slot_size)
150 {
151 unifi_error(card->ospriv,
152 "send_signal - Invalid data length %u (@%p), "
153 "truncating\n",
154 datalen, bulkdata->d[i].os_data_ptr);
155 datalen = data_slot_size;
156 debug_print++;
157 }
158 /* Store the bulk data info in the soft queue. */
159 csptr->bulkdata[i].os_data_ptr = (u8 *)bulkdata->d[i].os_data_ptr;
160 csptr->bulkdata[i].os_net_buf_ptr = (u8 *)bulkdata->d[i].os_net_buf_ptr;
161 csptr->bulkdata[i].net_buf_length = bulkdata->d[i].net_buf_length;
162 csptr->bulkdata[i].data_length = datalen;
163 }
164 }
165 else
166 {
167 UNIFI_INIT_BULK_DATA(&csptr->bulkdata[i]);
168 }
169 }
170
171 if (debug_print)
172 {
173 const u8 *sig = sigptr;
174
175 unifi_error(card->ospriv, "Signal(%d): %*ph\n", siglen,
176 16, sig);
177 unifi_error(card->ospriv, "Bulkdata pointer %p(%d), %p(%d)\n",
178 bulkdata != NULL?bulkdata->d[0].os_data_ptr : NULL,
179 bulkdata != NULL?bulkdata->d[0].data_length : 0,
180 bulkdata != NULL?bulkdata->d[1].os_data_ptr : NULL,
181 bulkdata != NULL?bulkdata->d[1].data_length : 0);
182 }
183
184 /* Advance the written count to say there is a new entry */
185 CSR_WIFI_HIP_Q_INC_W(sigq);
186
187 /*
188 * Set the flag to say reason for waking was a host request.
189 * Then ask the OS layer to run the unifi_bh.
190 */
191 if (run_bh == 1)
192 {
193 card->bh_reason_host = 1;
194 r = unifi_run_bh(card->ospriv);
195 if (r != CSR_RESULT_SUCCESS)
196 {
197 unifi_error(card->ospriv, "failed to run bh.\n");
198 card->bh_reason_host = 0;
199
200 /*
201 * The bulk data buffer will be freed by the caller.
202 * We need to invalidate the description of the bulk data in our
203 * soft queue, to prevent the core freeing the bulk data again later.
204 */
205 for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; ++i)
206 {
207 if (csptr->bulkdata[i].data_length != 0)
208 {
209 csptr->bulkdata[i].os_data_ptr = csptr->bulkdata[i].os_net_buf_ptr = NULL;
210 csptr->bulkdata[i].net_buf_length = csptr->bulkdata[i].data_length = 0;
211 }
212 }
213 return r;
214 }
215 }
216 else
217 {
218 unifi_error(card->ospriv, "run_bh=%d, bh not called.\n", run_bh);
219 }
220
221 /*
222 * Have we used up all the fh signal list entries?
223 */
224 if (CSR_WIFI_HIP_Q_SLOTS_FREE(sigq) == 0)
225 {
226 /* We have filled the queue, so stop the upper layer. The command queue
227 * is an exception, as suspending due to that being full could delay
228 * resume/retry until new commands or data are received.
229 */
230 if (sigq != &card->fh_command_queue)
231 {
232 /*
233 * Must call unifi_pause_xmit() *before* setting the paused flag.
234 * (the unifi_pause_xmit call should not be after setting the flag because of the possibility of being interrupted
235 * by the bh thread between our setting the flag and the call to unifi_pause_xmit()
236 * If bh thread then cleared the flag, we would end up paused, but without the flag set)
237 * Instead, setting it afterwards means that if this thread is interrupted by the bh thread
238 * the pause flag is still guaranteed to end up set
239 * However the potential deadlock now is that if bh thread emptied the queue and cleared the flag before this thread's
240 * call to unifi_pause_xmit(), then bh thread may not run again because it will be waiting for
241 * a packet to appear in the queue but nothing ever will because xmit is paused.
242 * So we will end up with the queue paused, and the flag set to say it is paused, but bh never runs to unpause it.
243 * (Note even this bad situation would not persist long in practice, because something else (eg rx, or tx in different queue)
244 * is likely to wake bh thread quite soon)
245 * But to avoid this deadlock completely, after setting the flag we check that there is something left in the queue.
246 * If there is, we know that bh thread has not emptied the queue yet.
247 * Since bh thread checks to unpause the queue *after* taking packets from the queue, we know that it is still going to make at
248 * least one more check to see whether it needs to unpause the queue. So all is well.
249 * If there are no packets in the queue, then the deadlock described above might happen. To make sure it does not, we
250 * unpause the queue here. A possible side effect is that unifi_restart_xmit() may (rarely) be called for second time
251 * unnecessarily, which is harmless
252 */
253
254#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
255 unifi_debug_log_to_buf("P");
256#endif
257 unifi_pause_xmit(card->ospriv, (unifi_TrafficQueue)priority_q);
258 card_tx_q_pause(card, priority_q);
259 if (CSR_WIFI_HIP_Q_SLOTS_USED(sigq) == 0)
260 {
261 card_tx_q_unpause(card, priority_q);
262 unifi_restart_xmit(card->ospriv, (unifi_TrafficQueue) priority_q);
263 }
264 }
265 else
266 {
267 unifi_warning(card->ospriv,
268 "send_signal: fh_cmd_q full, not pausing (run_bh=%d)\n",
269 run_bh);
270 }
271 }
272
273 return CSR_RESULT_SUCCESS;
274} /* send_signal() */
275
276
277/*
278 * ---------------------------------------------------------------------------
279 * unifi_send_signal
280 *
281 * Invokes send_signal() to queue a signal in the command or traffic queue
282 * If sigptr pointer is NULL, it pokes the bh to check if UniFi is responsive.
283 *
284 * Arguments:
285 * card Pointer to card context struct
286 * sigptr Pointer to signal from card.
287 * siglen Size of the signal
288 * bulkdata Pointer to the bulk data of the signal
289 *
290 * Returns:
291 * CSR_RESULT_SUCCESS on success
292 * CSR_WIFI_HIP_RESULT_NO_SPACE if there were insufficient data slots or no free signal queue entry
293 *
294 * Notes:
295 * unifi_send_signal() is used to queue signals, created by the driver,
296 * to the device. Signals are constructed using the UniFi packed structures.
297 * ---------------------------------------------------------------------------
298 */
299CsrResult unifi_send_signal(card_t *card, const u8 *sigptr, u32 siglen,
300 const bulk_data_param_t *bulkdata)
301{
302 q_t *sig_soft_q;
303 u16 signal_id;
304 CsrResult r;
305 u32 run_bh;
306 u32 priority_q;
307
308 /* A NULL signal pointer is a request to check if UniFi is responsive */
309 if (sigptr == NULL)
310 {
311 card->bh_reason_host = 1;
312 return unifi_run_bh(card->ospriv);
313 }
314
315 priority_q = 0;
316 run_bh = 1;
317 signal_id = GET_SIGNAL_ID(sigptr);
318 /*
319 * If the signal is a CSR_MA_PACKET_REQUEST ,
320 * we send it using the traffic soft queue. Else we use the command soft queue.
321 */
322 if (signal_id == CSR_MA_PACKET_REQUEST_ID)
323 {
324 u16 frame_priority;
325
326 if (card->periodic_wake_mode == UNIFI_PERIODIC_WAKE_HOST_ENABLED)
327 {
328 run_bh = 0;
329 }
330
331#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
332 unifi_debug_log_to_buf("D");
333#endif
334 /* Sanity check: MA-PACKET.req must have a valid bulk data */
335 if ((bulkdata->d[0].data_length == 0) || (bulkdata->d[0].os_data_ptr == NULL))
336 {
337 unifi_error(card->ospriv, "MA-PACKET.req with empty bulk data (%d bytes in %p)\n",
338 bulkdata->d[0].data_length, bulkdata->d[0].os_data_ptr);
339 dump((void *)sigptr, siglen);
340 return CSR_RESULT_FAILURE;
341 }
342
343 /* Map the frame priority to a traffic queue index. */
344 frame_priority = GET_PACKED_MA_PACKET_REQUEST_FRAME_PRIORITY(sigptr);
345 priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY)frame_priority);
346
347 sig_soft_q = &card->fh_traffic_queue[priority_q];
348 }
349 else
350 {
351 sig_soft_q = &card->fh_command_queue;
352 }
353
354 r = send_signal(card, sigptr, siglen, bulkdata, sig_soft_q, priority_q, run_bh);
355 /* On error, the caller must free or requeue bulkdata buffers */
356
357 return r;
358} /* unifi_send_signal() */
359
360
361/*
362 * ---------------------------------------------------------------------------
363 * unifi_send_resources_available
364 *
365 * Examines whether there is available space to queue
366 * a signal in the command or traffic queue
367 *
368 * Arguments:
369 * card Pointer to card context struct
370 * sigptr Pointer to signal.
371 *
372 * Returns:
373 * CSR_RESULT_SUCCESS if resources available
374 * CSR_WIFI_HIP_RESULT_NO_SPACE if there was no free signal queue entry
375 *
376 * Notes:
377 * ---------------------------------------------------------------------------
378 */
379CsrResult unifi_send_resources_available(card_t *card, const u8 *sigptr)
380{
381 q_t *sig_soft_q;
382 u16 signal_id = GET_SIGNAL_ID(sigptr);
383
384 /*
385 * If the signal is a CSR_MA_PACKET_REQUEST ,
386 * we send it using the traffic soft queue. Else we use the command soft queue.
387 */
388 if (signal_id == CSR_MA_PACKET_REQUEST_ID)
389 {
390 u16 frame_priority;
391 u32 priority_q;
392
393 /* Map the frame priority to a traffic queue index. */
394 frame_priority = GET_PACKED_MA_PACKET_REQUEST_FRAME_PRIORITY(sigptr);
395 priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY)frame_priority);
396
397 sig_soft_q = &card->fh_traffic_queue[priority_q];
398 }
399 else
400 {
401 sig_soft_q = &card->fh_command_queue;
402 }
403
404 /* Check that the fh_data_queue has a free slot */
405 if (!CSR_WIFI_HIP_Q_SLOTS_FREE(sig_soft_q))
406 {
407 unifi_notice(card->ospriv, "unifi_send_resources_available: %s full\n",
408 sig_soft_q->name);
409 return CSR_WIFI_HIP_RESULT_NO_SPACE;
410 }
411
412 return CSR_RESULT_SUCCESS;
413} /* unifi_send_resources_available() */
414
415
diff --git a/drivers/staging/csr/csr_wifi_hip_signals.c b/drivers/staging/csr/csr_wifi_hip_signals.c
deleted file mode 100644
index 3c821320df00..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_signals.c
+++ /dev/null
@@ -1,1313 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/* Note: this is an auto-generated file. */
12
13
14/* Generated by hip_dd_l_c_gen.pl */
15
16#include "csr_wifi_hip_signals.h"
17
18#include "csr_wifi_hip_unifi.h"
19
20s32 SigGetSize(const CSR_SIGNAL *aSignal)
21{
22 switch (aSignal->SignalPrimitiveHeader.SignalId)
23 {
24 case CSR_MA_PACKET_REQUEST_ID:
25 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_REQUEST);
26 case CSR_MA_PACKET_CONFIRM_ID:
27 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_CONFIRM);
28 case CSR_MA_PACKET_INDICATION_ID:
29 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_INDICATION);
30 case CSR_MA_PACKET_CANCEL_REQUEST_ID:
31 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_CANCEL_REQUEST);
32 case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
33 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_VIF_AVAILABILITY_RESPONSE);
34 case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
35 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_VIF_AVAILABILITY_INDICATION);
36 case CSR_MA_PACKET_ERROR_INDICATION_ID:
37 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_ERROR_INDICATION);
38#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
39 case CSR_MLME_RESET_REQUEST_ID:
40 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_RESET_REQUEST);
41#endif
42#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
43 case CSR_MLME_RESET_CONFIRM_ID:
44 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_RESET_CONFIRM);
45#endif
46#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
47 case CSR_MLME_GET_REQUEST_ID:
48 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_REQUEST);
49#endif
50#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
51 case CSR_MLME_GET_CONFIRM_ID:
52 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_CONFIRM);
53#endif
54#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
55 case CSR_MLME_SET_REQUEST_ID:
56 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_REQUEST);
57#endif
58#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
59 case CSR_MLME_SET_CONFIRM_ID:
60 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_CONFIRM);
61#endif
62#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
63 case CSR_MLME_GET_NEXT_REQUEST_ID:
64 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_NEXT_REQUEST);
65#endif
66#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
67 case CSR_MLME_GET_NEXT_CONFIRM_ID:
68 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_NEXT_CONFIRM);
69#endif
70#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
71 case CSR_MLME_POWERMGT_REQUEST_ID:
72 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_POWERMGT_REQUEST);
73#endif
74#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
75 case CSR_MLME_POWERMGT_CONFIRM_ID:
76 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_POWERMGT_CONFIRM);
77#endif
78#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
79 case CSR_MLME_SCAN_REQUEST_ID:
80 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SCAN_REQUEST);
81#endif
82#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
83 case CSR_MLME_SCAN_CONFIRM_ID:
84 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SCAN_CONFIRM);
85#endif
86#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
87 case CSR_MLME_HL_SYNC_REQUEST_ID:
88 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_REQUEST);
89#endif
90#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
91 case CSR_MLME_HL_SYNC_CONFIRM_ID:
92 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_CONFIRM);
93#endif
94#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
95 case CSR_MLME_MEASURE_REQUEST_ID:
96 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MEASURE_REQUEST);
97#endif
98#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
99 case CSR_MLME_MEASURE_CONFIRM_ID:
100 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MEASURE_CONFIRM);
101#endif
102#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
103 case CSR_MLME_MEASURE_INDICATION_ID:
104 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MEASURE_INDICATION);
105#endif
106#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
107 case CSR_MLME_SETKEYS_REQUEST_ID:
108 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SETKEYS_REQUEST);
109#endif
110#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
111 case CSR_MLME_SETKEYS_CONFIRM_ID:
112 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SETKEYS_CONFIRM);
113#endif
114#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
115 case CSR_MLME_DELETEKEYS_REQUEST_ID:
116 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DELETEKEYS_REQUEST);
117#endif
118#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
119 case CSR_MLME_DELETEKEYS_CONFIRM_ID:
120 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DELETEKEYS_CONFIRM);
121#endif
122#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
123 case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
124 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION);
125#endif
126#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
127 case CSR_MLME_CONNECTED_INDICATION_ID:
128 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONNECTED_INDICATION);
129#endif
130#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
131 case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
132 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SCAN_CANCEL_REQUEST);
133#endif
134#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
135 case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
136 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_CANCEL_REQUEST);
137#endif
138#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
139 case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
140 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_CANCEL_CONFIRM);
141#endif
142#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
143 case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
144 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_PERIODIC_REQUEST);
145#endif
146#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
147 case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
148 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_PERIODIC_CONFIRM);
149#endif
150#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
151 case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
152 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_PERIODIC_REQUEST);
153#endif
154#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
155 case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
156 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_PERIODIC_CONFIRM);
157#endif
158#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
159 case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
160 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST);
161#endif
162#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
163 case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
164 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM);
165#endif
166#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
167 case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
168 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST);
169#endif
170#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
171 case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
172 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM);
173#endif
174#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
175 case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
176 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_PACKET_FILTER_REQUEST);
177#endif
178#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
179 case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
180 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_PACKET_FILTER_CONFIRM);
181#endif
182#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
183 case CSR_MLME_STOP_MEASURE_REQUEST_ID:
184 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_MEASURE_REQUEST);
185#endif
186#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
187 case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
188 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_MEASURE_CONFIRM);
189#endif
190#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
191 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
192 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST);
193#endif
194#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
195 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
196 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM);
197#endif
198#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
199 case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
200 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION);
201#endif
202#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
203 case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
204 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TRIGGERED_GET_REQUEST);
205#endif
206#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
207 case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
208 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TRIGGERED_GET_CONFIRM);
209#endif
210#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
211 case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
212 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TRIGGERED_GET_REQUEST);
213#endif
214#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
215 case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
216 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TRIGGERED_GET_CONFIRM);
217#endif
218#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
219 case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
220 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_TRIGGERED_GET_INDICATION);
221#endif
222#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
223 case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
224 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_BLACKOUT_REQUEST);
225#endif
226#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
227 case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
228 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_BLACKOUT_CONFIRM);
229#endif
230#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
231 case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
232 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_BLACKOUT_ENDED_INDICATION);
233#endif
234#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
235 case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
236 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_BLACKOUT_REQUEST);
237#endif
238#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
239 case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
240 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_BLACKOUT_CONFIRM);
241#endif
242#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
243 case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
244 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_RX_TRIGGER_REQUEST);
245#endif
246#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
247 case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
248 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_RX_TRIGGER_CONFIRM);
249#endif
250#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
251 case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
252 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_RX_TRIGGER_REQUEST);
253#endif
254#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
255 case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
256 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_RX_TRIGGER_CONFIRM);
257#endif
258#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
259 case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
260 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONNECT_STATUS_REQUEST);
261#endif
262#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
263 case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
264 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONNECT_STATUS_CONFIRM);
265#endif
266#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
267 case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
268 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST);
269#endif
270#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
271 case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
272 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM);
273#endif
274#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
275 case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
276 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TEMPLATE_REQUEST);
277#endif
278#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
279 case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
280 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TEMPLATE_CONFIRM);
281#endif
282#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
283 case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
284 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONFIG_QUEUE_REQUEST);
285#endif
286#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
287 case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
288 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONFIG_QUEUE_CONFIRM);
289#endif
290#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
291 case CSR_MLME_ADD_TSPEC_REQUEST_ID:
292 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TSPEC_REQUEST);
293#endif
294#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
295 case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
296 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TSPEC_CONFIRM);
297#endif
298#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
299 case CSR_MLME_DEL_TSPEC_REQUEST_ID:
300 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TSPEC_REQUEST);
301#endif
302#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
303 case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
304 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TSPEC_CONFIRM);
305#endif
306#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
307 case CSR_MLME_START_AGGREGATION_REQUEST_ID:
308 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_START_AGGREGATION_REQUEST);
309#endif
310#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
311 case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
312 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_START_AGGREGATION_CONFIRM);
313#endif
314#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
315 case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
316 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_BLOCKACK_ERROR_INDICATION);
317#endif
318#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
319 case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
320 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_AGGREGATION_REQUEST);
321#endif
322#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
323 case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
324 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_AGGREGATION_CONFIRM);
325#endif
326#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
327 case CSR_MLME_SM_START_REQUEST_ID:
328 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SM_START_REQUEST);
329#endif
330#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
331 case CSR_MLME_SM_START_CONFIRM_ID:
332 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SM_START_CONFIRM);
333#endif
334#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
335 case CSR_MLME_LEAVE_REQUEST_ID:
336 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_LEAVE_REQUEST);
337#endif
338#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
339 case CSR_MLME_LEAVE_CONFIRM_ID:
340 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_LEAVE_CONFIRM);
341#endif
342 case CSR_MLME_SET_TIM_REQUEST_ID:
343 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_TIM_REQUEST);
344 case CSR_MLME_SET_TIM_CONFIRM_ID:
345 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_TIM_CONFIRM);
346#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
347 case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
348 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_KEY_SEQUENCE_REQUEST);
349#endif
350#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
351 case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
352 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_KEY_SEQUENCE_CONFIRM);
353#endif
354#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
355 case CSR_MLME_SET_CHANNEL_REQUEST_ID:
356 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_CHANNEL_REQUEST);
357#endif
358#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
359 case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
360 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_CHANNEL_CONFIRM);
361#endif
362#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
363 case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
364 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST);
365#endif
366#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
367 case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
368 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM);
369#endif
370 case CSR_DEBUG_STRING_INDICATION_ID:
371 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_STRING_INDICATION);
372 case CSR_DEBUG_WORD16_INDICATION_ID:
373 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_WORD16_INDICATION);
374 case CSR_DEBUG_GENERIC_REQUEST_ID:
375 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_GENERIC_REQUEST);
376 case CSR_DEBUG_GENERIC_CONFIRM_ID:
377 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_GENERIC_CONFIRM);
378 case CSR_DEBUG_GENERIC_INDICATION_ID:
379 return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_GENERIC_INDICATION);
380 default:
381 return 0;
382 }
383}
384
385
386s32 SigGetDataRefs(CSR_SIGNAL *aSignal, CSR_DATAREF **aDataRef)
387{
388 s32 numRefs = 0;
389
390 switch (aSignal->SignalPrimitiveHeader.SignalId)
391 {
392 case CSR_MA_PACKET_REQUEST_ID:
393 aDataRef[numRefs++] = &aSignal->u.MaPacketRequest.Data;
394 aDataRef[numRefs++] = &aSignal->u.MaPacketRequest.Dummydataref2;
395 break;
396 case CSR_MA_PACKET_CONFIRM_ID:
397 aDataRef[numRefs++] = &aSignal->u.MaPacketConfirm.Dummydataref1;
398 aDataRef[numRefs++] = &aSignal->u.MaPacketConfirm.Dummydataref2;
399 break;
400 case CSR_MA_PACKET_INDICATION_ID:
401 aDataRef[numRefs++] = &aSignal->u.MaPacketIndication.Data;
402 aDataRef[numRefs++] = &aSignal->u.MaPacketIndication.Dummydataref2;
403 break;
404 case CSR_MA_PACKET_CANCEL_REQUEST_ID:
405 aDataRef[numRefs++] = &aSignal->u.MaPacketCancelRequest.Dummydataref1;
406 aDataRef[numRefs++] = &aSignal->u.MaPacketCancelRequest.Dummydataref2;
407 break;
408 case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
409 aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityResponse.Dummydataref1;
410 aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityResponse.Dummydataref2;
411 break;
412 case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
413 aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityIndication.Dummydataref1;
414 aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityIndication.Dummydataref2;
415 break;
416 case CSR_MA_PACKET_ERROR_INDICATION_ID:
417 aDataRef[numRefs++] = &aSignal->u.MaPacketErrorIndication.Dummydataref1;
418 aDataRef[numRefs++] = &aSignal->u.MaPacketErrorIndication.Dummydataref2;
419 break;
420#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
421 case CSR_MLME_RESET_REQUEST_ID:
422 aDataRef[numRefs++] = &aSignal->u.MlmeResetRequest.Dummydataref1;
423 aDataRef[numRefs++] = &aSignal->u.MlmeResetRequest.Dummydataref2;
424 break;
425#endif
426#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
427 case CSR_MLME_RESET_CONFIRM_ID:
428 aDataRef[numRefs++] = &aSignal->u.MlmeResetConfirm.Dummydataref1;
429 aDataRef[numRefs++] = &aSignal->u.MlmeResetConfirm.Dummydataref2;
430 break;
431#endif
432#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
433 case CSR_MLME_GET_REQUEST_ID:
434 aDataRef[numRefs++] = &aSignal->u.MlmeGetRequest.MibAttribute;
435 aDataRef[numRefs++] = &aSignal->u.MlmeGetRequest.Dummydataref2;
436 break;
437#endif
438#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
439 case CSR_MLME_GET_CONFIRM_ID:
440 aDataRef[numRefs++] = &aSignal->u.MlmeGetConfirm.MibAttributeValue;
441 aDataRef[numRefs++] = &aSignal->u.MlmeGetConfirm.Dummydataref2;
442 break;
443#endif
444#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
445 case CSR_MLME_SET_REQUEST_ID:
446 aDataRef[numRefs++] = &aSignal->u.MlmeSetRequest.MibAttributeValue;
447 aDataRef[numRefs++] = &aSignal->u.MlmeSetRequest.Dummydataref2;
448 break;
449#endif
450#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
451 case CSR_MLME_SET_CONFIRM_ID:
452 aDataRef[numRefs++] = &aSignal->u.MlmeSetConfirm.MibAttributeValue;
453 aDataRef[numRefs++] = &aSignal->u.MlmeSetConfirm.Dummydataref2;
454 break;
455#endif
456#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
457 case CSR_MLME_GET_NEXT_REQUEST_ID:
458 aDataRef[numRefs++] = &aSignal->u.MlmeGetNextRequest.MibAttribute;
459 aDataRef[numRefs++] = &aSignal->u.MlmeGetNextRequest.Dummydataref2;
460 break;
461#endif
462#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
463 case CSR_MLME_GET_NEXT_CONFIRM_ID:
464 aDataRef[numRefs++] = &aSignal->u.MlmeGetNextConfirm.MibAttributeValue;
465 aDataRef[numRefs++] = &aSignal->u.MlmeGetNextConfirm.Dummydataref2;
466 break;
467#endif
468#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
469 case CSR_MLME_POWERMGT_REQUEST_ID:
470 aDataRef[numRefs++] = &aSignal->u.MlmePowermgtRequest.Dummydataref1;
471 aDataRef[numRefs++] = &aSignal->u.MlmePowermgtRequest.Dummydataref2;
472 break;
473#endif
474#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
475 case CSR_MLME_POWERMGT_CONFIRM_ID:
476 aDataRef[numRefs++] = &aSignal->u.MlmePowermgtConfirm.Dummydataref1;
477 aDataRef[numRefs++] = &aSignal->u.MlmePowermgtConfirm.Dummydataref2;
478 break;
479#endif
480#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
481 case CSR_MLME_SCAN_REQUEST_ID:
482 aDataRef[numRefs++] = &aSignal->u.MlmeScanRequest.ChannelList;
483 aDataRef[numRefs++] = &aSignal->u.MlmeScanRequest.InformationElements;
484 break;
485#endif
486#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
487 case CSR_MLME_SCAN_CONFIRM_ID:
488 aDataRef[numRefs++] = &aSignal->u.MlmeScanConfirm.Dummydataref1;
489 aDataRef[numRefs++] = &aSignal->u.MlmeScanConfirm.Dummydataref2;
490 break;
491#endif
492#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
493 case CSR_MLME_HL_SYNC_REQUEST_ID:
494 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncRequest.Dummydataref1;
495 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncRequest.Dummydataref2;
496 break;
497#endif
498#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
499 case CSR_MLME_HL_SYNC_CONFIRM_ID:
500 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncConfirm.Dummydataref1;
501 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncConfirm.Dummydataref2;
502 break;
503#endif
504#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
505 case CSR_MLME_MEASURE_REQUEST_ID:
506 aDataRef[numRefs++] = &aSignal->u.MlmeMeasureRequest.MeasurementRequestSet;
507 aDataRef[numRefs++] = &aSignal->u.MlmeMeasureRequest.Dummydataref2;
508 break;
509#endif
510#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
511 case CSR_MLME_MEASURE_CONFIRM_ID:
512 aDataRef[numRefs++] = &aSignal->u.MlmeMeasureConfirm.Dummydataref1;
513 aDataRef[numRefs++] = &aSignal->u.MlmeMeasureConfirm.Dummydataref2;
514 break;
515#endif
516#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
517 case CSR_MLME_MEASURE_INDICATION_ID:
518 aDataRef[numRefs++] = &aSignal->u.MlmeMeasureIndication.MeasurementReportSet;
519 aDataRef[numRefs++] = &aSignal->u.MlmeMeasureIndication.Dummydataref2;
520 break;
521#endif
522#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
523 case CSR_MLME_SETKEYS_REQUEST_ID:
524 aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysRequest.Key;
525 aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysRequest.Dummydataref2;
526 break;
527#endif
528#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
529 case CSR_MLME_SETKEYS_CONFIRM_ID:
530 aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysConfirm.Dummydataref1;
531 aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysConfirm.Dummydataref2;
532 break;
533#endif
534#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
535 case CSR_MLME_DELETEKEYS_REQUEST_ID:
536 aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysRequest.Dummydataref1;
537 aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysRequest.Dummydataref2;
538 break;
539#endif
540#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
541 case CSR_MLME_DELETEKEYS_CONFIRM_ID:
542 aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysConfirm.Dummydataref1;
543 aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysConfirm.Dummydataref2;
544 break;
545#endif
546#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
547 case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
548 aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanLossIndication.Dummydataref1;
549 aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanLossIndication.Dummydataref2;
550 break;
551#endif
552#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
553 case CSR_MLME_CONNECTED_INDICATION_ID:
554 aDataRef[numRefs++] = &aSignal->u.MlmeConnectedIndication.Dummydataref1;
555 aDataRef[numRefs++] = &aSignal->u.MlmeConnectedIndication.Dummydataref2;
556 break;
557#endif
558#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
559 case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
560 aDataRef[numRefs++] = &aSignal->u.MlmeScanCancelRequest.Dummydataref1;
561 aDataRef[numRefs++] = &aSignal->u.MlmeScanCancelRequest.Dummydataref2;
562 break;
563#endif
564#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
565 case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
566 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelRequest.Dummydataref1;
567 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelRequest.Dummydataref2;
568 break;
569#endif
570#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
571 case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
572 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelConfirm.Dummydataref1;
573 aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelConfirm.Dummydataref2;
574 break;
575#endif
576#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
577 case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
578 aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicRequest.Dummydataref1;
579 aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicRequest.Dummydataref2;
580 break;
581#endif
582#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
583 case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
584 aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicConfirm.Dummydataref1;
585 aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicConfirm.Dummydataref2;
586 break;
587#endif
588#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
589 case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
590 aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicRequest.Dummydataref1;
591 aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicRequest.Dummydataref2;
592 break;
593#endif
594#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
595 case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
596 aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicConfirm.Dummydataref1;
597 aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicConfirm.Dummydataref2;
598 break;
599#endif
600#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
601 case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
602 aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanRequest.ChannelList;
603 aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanRequest.InformationElements;
604 break;
605#endif
606#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
607 case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
608 aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanConfirm.Dummydataref1;
609 aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanConfirm.Dummydataref2;
610 break;
611#endif
612#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
613 case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
614 aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanRequest.Dummydataref1;
615 aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanRequest.Dummydataref2;
616 break;
617#endif
618#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
619 case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
620 aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanConfirm.Dummydataref1;
621 aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanConfirm.Dummydataref2;
622 break;
623#endif
624#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
625 case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
626 aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterRequest.InformationElements;
627 aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterRequest.Dummydataref2;
628 break;
629#endif
630#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
631 case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
632 aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterConfirm.Dummydataref1;
633 aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterConfirm.Dummydataref2;
634 break;
635#endif
636#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
637 case CSR_MLME_STOP_MEASURE_REQUEST_ID:
638 aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureRequest.Dummydataref1;
639 aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureRequest.Dummydataref2;
640 break;
641#endif
642#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
643 case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
644 aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureConfirm.Dummydataref1;
645 aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureConfirm.Dummydataref2;
646 break;
647#endif
648#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
649 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
650 aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanRequest.Dummydataref1;
651 aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanRequest.Dummydataref2;
652 break;
653#endif
654#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
655 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
656 aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanConfirm.Dummydataref1;
657 aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanConfirm.Dummydataref2;
658 break;
659#endif
660#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
661 case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
662 aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanDoneIndication.Dummydataref1;
663 aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanDoneIndication.Dummydataref2;
664 break;
665#endif
666#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
667 case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
668 aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetRequest.MibAttribute;
669 aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetRequest.Dummydataref2;
670 break;
671#endif
672#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
673 case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
674 aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetConfirm.Dummydataref1;
675 aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetConfirm.Dummydataref2;
676 break;
677#endif
678#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
679 case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
680 aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetRequest.Dummydataref1;
681 aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetRequest.Dummydataref2;
682 break;
683#endif
684#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
685 case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
686 aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetConfirm.Dummydataref1;
687 aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetConfirm.Dummydataref2;
688 break;
689#endif
690#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
691 case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
692 aDataRef[numRefs++] = &aSignal->u.MlmeTriggeredGetIndication.MibAttributeValue;
693 aDataRef[numRefs++] = &aSignal->u.MlmeTriggeredGetIndication.Dummydataref2;
694 break;
695#endif
696#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
697 case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
698 aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutRequest.Dummydataref1;
699 aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutRequest.Dummydataref2;
700 break;
701#endif
702#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
703 case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
704 aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutConfirm.Dummydataref1;
705 aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutConfirm.Dummydataref2;
706 break;
707#endif
708#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
709 case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
710 aDataRef[numRefs++] = &aSignal->u.MlmeBlackoutEndedIndication.Dummydataref1;
711 aDataRef[numRefs++] = &aSignal->u.MlmeBlackoutEndedIndication.Dummydataref2;
712 break;
713#endif
714#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
715 case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
716 aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutRequest.Dummydataref1;
717 aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutRequest.Dummydataref2;
718 break;
719#endif
720#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
721 case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
722 aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutConfirm.Dummydataref1;
723 aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutConfirm.Dummydataref2;
724 break;
725#endif
726#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
727 case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
728 aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerRequest.InformationElements;
729 aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerRequest.Dummydataref2;
730 break;
731#endif
732#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
733 case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
734 aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerConfirm.Dummydataref1;
735 aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerConfirm.Dummydataref2;
736 break;
737#endif
738#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
739 case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
740 aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerRequest.Dummydataref1;
741 aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerRequest.Dummydataref2;
742 break;
743#endif
744#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
745 case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
746 aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerConfirm.Dummydataref1;
747 aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerConfirm.Dummydataref2;
748 break;
749#endif
750#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
751 case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
752 aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusRequest.InformationElements;
753 aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusRequest.Dummydataref2;
754 break;
755#endif
756#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
757 case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
758 aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusConfirm.Dummydataref1;
759 aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusConfirm.Dummydataref2;
760 break;
761#endif
762#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
763 case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
764 aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterRequest.Data;
765 aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterRequest.Dummydataref2;
766 break;
767#endif
768#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
769 case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
770 aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterConfirm.Dummydataref1;
771 aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterConfirm.Dummydataref2;
772 break;
773#endif
774#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
775 case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
776 aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateRequest.Data1;
777 aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateRequest.Data2;
778 break;
779#endif
780#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
781 case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
782 aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateConfirm.Dummydataref1;
783 aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateConfirm.Dummydataref2;
784 break;
785#endif
786#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
787 case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
788 aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueRequest.Dummydataref1;
789 aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueRequest.Dummydataref2;
790 break;
791#endif
792#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
793 case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
794 aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueConfirm.Dummydataref1;
795 aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueConfirm.Dummydataref2;
796 break;
797#endif
798#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
799 case CSR_MLME_ADD_TSPEC_REQUEST_ID:
800 aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecRequest.Dummydataref1;
801 aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecRequest.Dummydataref2;
802 break;
803#endif
804#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
805 case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
806 aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecConfirm.Dummydataref1;
807 aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecConfirm.Dummydataref2;
808 break;
809#endif
810#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
811 case CSR_MLME_DEL_TSPEC_REQUEST_ID:
812 aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecRequest.Dummydataref1;
813 aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecRequest.Dummydataref2;
814 break;
815#endif
816#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
817 case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
818 aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecConfirm.Dummydataref1;
819 aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecConfirm.Dummydataref2;
820 break;
821#endif
822#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
823 case CSR_MLME_START_AGGREGATION_REQUEST_ID:
824 aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationRequest.Dummydataref1;
825 aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationRequest.Dummydataref2;
826 break;
827#endif
828#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
829 case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
830 aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationConfirm.Dummydataref1;
831 aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationConfirm.Dummydataref2;
832 break;
833#endif
834#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
835 case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
836 aDataRef[numRefs++] = &aSignal->u.MlmeBlockackErrorIndication.Dummydataref1;
837 aDataRef[numRefs++] = &aSignal->u.MlmeBlockackErrorIndication.Dummydataref2;
838 break;
839#endif
840#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
841 case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
842 aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationRequest.Dummydataref1;
843 aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationRequest.Dummydataref2;
844 break;
845#endif
846#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
847 case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
848 aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationConfirm.Dummydataref1;
849 aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationConfirm.Dummydataref2;
850 break;
851#endif
852#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
853 case CSR_MLME_SM_START_REQUEST_ID:
854 aDataRef[numRefs++] = &aSignal->u.MlmeSmStartRequest.Beacon;
855 aDataRef[numRefs++] = &aSignal->u.MlmeSmStartRequest.BssParameters;
856 break;
857#endif
858#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
859 case CSR_MLME_SM_START_CONFIRM_ID:
860 aDataRef[numRefs++] = &aSignal->u.MlmeSmStartConfirm.Dummydataref1;
861 aDataRef[numRefs++] = &aSignal->u.MlmeSmStartConfirm.Dummydataref2;
862 break;
863#endif
864#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
865 case CSR_MLME_LEAVE_REQUEST_ID:
866 aDataRef[numRefs++] = &aSignal->u.MlmeLeaveRequest.Dummydataref1;
867 aDataRef[numRefs++] = &aSignal->u.MlmeLeaveRequest.Dummydataref2;
868 break;
869#endif
870#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
871 case CSR_MLME_LEAVE_CONFIRM_ID:
872 aDataRef[numRefs++] = &aSignal->u.MlmeLeaveConfirm.Dummydataref1;
873 aDataRef[numRefs++] = &aSignal->u.MlmeLeaveConfirm.Dummydataref2;
874 break;
875#endif
876 case CSR_MLME_SET_TIM_REQUEST_ID:
877 aDataRef[numRefs++] = &aSignal->u.MlmeSetTimRequest.Dummydataref1;
878 aDataRef[numRefs++] = &aSignal->u.MlmeSetTimRequest.Dummydataref2;
879 break;
880 case CSR_MLME_SET_TIM_CONFIRM_ID:
881 aDataRef[numRefs++] = &aSignal->u.MlmeSetTimConfirm.Dummydataref1;
882 aDataRef[numRefs++] = &aSignal->u.MlmeSetTimConfirm.Dummydataref2;
883 break;
884#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
885 case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
886 aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceRequest.Dummydataref1;
887 aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceRequest.Dummydataref2;
888 break;
889#endif
890#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
891 case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
892 aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceConfirm.Dummydataref1;
893 aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceConfirm.Dummydataref2;
894 break;
895#endif
896#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
897 case CSR_MLME_SET_CHANNEL_REQUEST_ID:
898 aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelRequest.Dummydataref1;
899 aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelRequest.Dummydataref2;
900 break;
901#endif
902#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
903 case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
904 aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelConfirm.Dummydataref1;
905 aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelConfirm.Dummydataref2;
906 break;
907#endif
908#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
909 case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
910 aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressRequest.Data;
911 aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressRequest.Dummydataref2;
912 break;
913#endif
914#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
915 case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
916 aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressConfirm.Dummydataref1;
917 aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressConfirm.Dummydataref2;
918 break;
919#endif
920 case CSR_DEBUG_STRING_INDICATION_ID:
921 aDataRef[numRefs++] = &aSignal->u.DebugStringIndication.DebugMessage;
922 aDataRef[numRefs++] = &aSignal->u.DebugStringIndication.Dummydataref2;
923 break;
924 case CSR_DEBUG_WORD16_INDICATION_ID:
925 aDataRef[numRefs++] = &aSignal->u.DebugWord16Indication.Dummydataref1;
926 aDataRef[numRefs++] = &aSignal->u.DebugWord16Indication.Dummydataref2;
927 break;
928 case CSR_DEBUG_GENERIC_REQUEST_ID:
929 aDataRef[numRefs++] = &aSignal->u.DebugGenericRequest.DebugVariable;
930 aDataRef[numRefs++] = &aSignal->u.DebugGenericRequest.Dummydataref2;
931 break;
932 case CSR_DEBUG_GENERIC_CONFIRM_ID:
933 aDataRef[numRefs++] = &aSignal->u.DebugGenericConfirm.DebugVariable;
934 aDataRef[numRefs++] = &aSignal->u.DebugGenericConfirm.Dummydataref2;
935 break;
936 case CSR_DEBUG_GENERIC_INDICATION_ID:
937 aDataRef[numRefs++] = &aSignal->u.DebugGenericIndication.DebugVariable;
938 aDataRef[numRefs++] = &aSignal->u.DebugGenericIndication.Dummydataref2;
939 break;
940 default:
941 return 0;
942 }
943 return numRefs;
944}
945
946
947u32 SigGetFilterPos(u16 aSigID)
948{
949 switch (aSigID)
950 {
951 case CSR_MA_PACKET_REQUEST_ID:
952 return 0x00000001;
953 case CSR_MA_PACKET_CONFIRM_ID:
954 return 0x00000002;
955 case CSR_MA_PACKET_INDICATION_ID:
956 return 0x00000004;
957 case CSR_MA_PACKET_CANCEL_REQUEST_ID:
958 return 0x00000008;
959 case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
960 return 0x00000010;
961 case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
962 return 0x00000020;
963 case CSR_MA_PACKET_ERROR_INDICATION_ID:
964 return 0x00000040;
965#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
966 case CSR_MLME_RESET_REQUEST_ID:
967 return 0x00000080;
968#endif
969#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
970 case CSR_MLME_RESET_CONFIRM_ID:
971 return 0x00000100;
972#endif
973#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
974 case CSR_MLME_GET_REQUEST_ID:
975 return 0x00000200;
976#endif
977#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
978 case CSR_MLME_GET_CONFIRM_ID:
979 return 0x00000400;
980#endif
981#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
982 case CSR_MLME_SET_REQUEST_ID:
983 return 0x00000800;
984#endif
985#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
986 case CSR_MLME_SET_CONFIRM_ID:
987 return 0x00001000;
988#endif
989#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
990 case CSR_MLME_GET_NEXT_REQUEST_ID:
991 return 0x00002000;
992#endif
993#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
994 case CSR_MLME_GET_NEXT_CONFIRM_ID:
995 return 0x00004000;
996#endif
997#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
998 case CSR_MLME_POWERMGT_REQUEST_ID:
999 return 0x00008000;
1000#endif
1001#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1002 case CSR_MLME_POWERMGT_CONFIRM_ID:
1003 return 0x00010001;
1004#endif
1005#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1006 case CSR_MLME_SCAN_REQUEST_ID:
1007 return 0x00010002;
1008#endif
1009#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1010 case CSR_MLME_SCAN_CONFIRM_ID:
1011 return 0x00010004;
1012#endif
1013#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1014 case CSR_MLME_HL_SYNC_REQUEST_ID:
1015 return 0x00010008;
1016#endif
1017#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1018 case CSR_MLME_HL_SYNC_CONFIRM_ID:
1019 return 0x00010010;
1020#endif
1021#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1022 case CSR_MLME_MEASURE_REQUEST_ID:
1023 return 0x00010020;
1024#endif
1025#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1026 case CSR_MLME_MEASURE_CONFIRM_ID:
1027 return 0x00010040;
1028#endif
1029#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1030 case CSR_MLME_MEASURE_INDICATION_ID:
1031 return 0x00010080;
1032#endif
1033#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1034 case CSR_MLME_SETKEYS_REQUEST_ID:
1035 return 0x00010100;
1036#endif
1037#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1038 case CSR_MLME_SETKEYS_CONFIRM_ID:
1039 return 0x00010200;
1040#endif
1041#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1042 case CSR_MLME_DELETEKEYS_REQUEST_ID:
1043 return 0x00010400;
1044#endif
1045#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1046 case CSR_MLME_DELETEKEYS_CONFIRM_ID:
1047 return 0x00010800;
1048#endif
1049#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1050 case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
1051 return 0x00011000;
1052#endif
1053#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1054 case CSR_MLME_CONNECTED_INDICATION_ID:
1055 return 0x00012000;
1056#endif
1057#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1058 case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
1059 return 0x00014000;
1060#endif
1061#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1062 case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
1063 return 0x00018000;
1064#endif
1065#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1066 case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
1067 return 0x00020001;
1068#endif
1069#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1070 case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
1071 return 0x00020002;
1072#endif
1073#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1074 case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
1075 return 0x00020004;
1076#endif
1077#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1078 case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
1079 return 0x00020008;
1080#endif
1081#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1082 case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
1083 return 0x00020010;
1084#endif
1085#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1086 case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
1087 return 0x00020020;
1088#endif
1089#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1090 case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
1091 return 0x00020040;
1092#endif
1093#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1094 case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
1095 return 0x00020080;
1096#endif
1097#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1098 case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
1099 return 0x00020100;
1100#endif
1101#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1102 case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
1103 return 0x00020200;
1104#endif
1105#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1106 case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
1107 return 0x00020400;
1108#endif
1109#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1110 case CSR_MLME_STOP_MEASURE_REQUEST_ID:
1111 return 0x00020800;
1112#endif
1113#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1114 case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
1115 return 0x00021000;
1116#endif
1117#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1118 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
1119 return 0x00022000;
1120#endif
1121#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1122 case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
1123 return 0x00024000;
1124#endif
1125#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1126 case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
1127 return 0x00028000;
1128#endif
1129#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1130 case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
1131 return 0x00030001;
1132#endif
1133#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1134 case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
1135 return 0x00030002;
1136#endif
1137#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1138 case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
1139 return 0x00030004;
1140#endif
1141#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1142 case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
1143 return 0x00030008;
1144#endif
1145#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1146 case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
1147 return 0x00030010;
1148#endif
1149#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1150 case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
1151 return 0x00030020;
1152#endif
1153#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1154 case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
1155 return 0x00030040;
1156#endif
1157#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1158 case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
1159 return 0x00030080;
1160#endif
1161#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1162 case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
1163 return 0x00030100;
1164#endif
1165#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1166 case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
1167 return 0x00030200;
1168#endif
1169#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1170 case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
1171 return 0x00030400;
1172#endif
1173#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1174 case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
1175 return 0x00030800;
1176#endif
1177#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1178 case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
1179 return 0x00031000;
1180#endif
1181#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1182 case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
1183 return 0x00032000;
1184#endif
1185#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1186 case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
1187 return 0x00034000;
1188#endif
1189#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1190 case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
1191 return 0x00038000;
1192#endif
1193#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1194 case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
1195 return 0x00040001;
1196#endif
1197#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1198 case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
1199 return 0x00040002;
1200#endif
1201#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1202 case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
1203 return 0x00040004;
1204#endif
1205#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1206 case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
1207 return 0x00040008;
1208#endif
1209#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1210 case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
1211 return 0x00040010;
1212#endif
1213#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1214 case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
1215 return 0x00040020;
1216#endif
1217#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1218 case CSR_MLME_ADD_TSPEC_REQUEST_ID:
1219 return 0x00040040;
1220#endif
1221#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1222 case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
1223 return 0x00040080;
1224#endif
1225#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1226 case CSR_MLME_DEL_TSPEC_REQUEST_ID:
1227 return 0x00040100;
1228#endif
1229#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1230 case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
1231 return 0x00040200;
1232#endif
1233#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1234 case CSR_MLME_START_AGGREGATION_REQUEST_ID:
1235 return 0x00040400;
1236#endif
1237#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1238 case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
1239 return 0x00040800;
1240#endif
1241#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1242 case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
1243 return 0x00041000;
1244#endif
1245#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1246 case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
1247 return 0x00042000;
1248#endif
1249#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1250 case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
1251 return 0x00044000;
1252#endif
1253#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1254 case CSR_MLME_SM_START_REQUEST_ID:
1255 return 0x00048000;
1256#endif
1257#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1258 case CSR_MLME_SM_START_CONFIRM_ID:
1259 return 0x00050001;
1260#endif
1261#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1262 case CSR_MLME_LEAVE_REQUEST_ID:
1263 return 0x00050002;
1264#endif
1265#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1266 case CSR_MLME_LEAVE_CONFIRM_ID:
1267 return 0x00050004;
1268#endif
1269 case CSR_MLME_SET_TIM_REQUEST_ID:
1270 return 0x00050008;
1271 case CSR_MLME_SET_TIM_CONFIRM_ID:
1272 return 0x00050010;
1273#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1274 case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
1275 return 0x00050020;
1276#endif
1277#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1278 case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
1279 return 0x00050040;
1280#endif
1281#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1282 case CSR_MLME_SET_CHANNEL_REQUEST_ID:
1283 return 0x00050080;
1284#endif
1285#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1286 case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
1287 return 0x00050100;
1288#endif
1289#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1290 case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
1291 return 0x00050200;
1292#endif
1293#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
1294 case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
1295 return 0x00050400;
1296#endif
1297 case CSR_DEBUG_STRING_INDICATION_ID:
1298 return 0x00050800;
1299 case CSR_DEBUG_WORD16_INDICATION_ID:
1300 return 0x00051000;
1301 case CSR_DEBUG_GENERIC_REQUEST_ID:
1302 return 0x00052000;
1303 case CSR_DEBUG_GENERIC_CONFIRM_ID:
1304 return 0x00054000;
1305 case CSR_DEBUG_GENERIC_INDICATION_ID:
1306 return 0x00058000;
1307 default:
1308 break;
1309 }
1310 return 0xffffffff;
1311}
1312
1313
diff --git a/drivers/staging/csr/csr_wifi_hip_signals.h b/drivers/staging/csr/csr_wifi_hip_signals.h
deleted file mode 100644
index ca4d0774195c..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_signals.h
+++ /dev/null
@@ -1,128 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/*
12 *****************************************************************************
13 *
14 * FILE: csr_wifi_hip_signals.h
15 *
16 * PURPOSE:
17 * Header file wrapping the auto-generated code in csr_wifi_hip_sigs.h
18 * and csr_wifi_hip_signals.c -
19 * csr_wifi_hip_sigs.h provides structures defining UniFi signals and
20 * csr_wifi_hip_signals.c provides SigGetSize() and SigGetDataRefs().
21 *
22 *****************************************************************************
23 */
24#ifndef __CSR_WIFI_HIP_SIGNALS_H__
25#define __CSR_WIFI_HIP_SIGNALS_H__
26
27#include <linux/types.h>
28#include "csr_wifi_hip_sigs.h"
29
30
31/****************************************************************************/
32/* INFORMATION ELEMENTS */
33/****************************************************************************/
34
35/* Information Element ID's - shouldn't be in here, but nowhere better yet */
36#define IE_SSID_ID 0
37#define IE_SUPPORTED_RATES_ID 1
38#define IE_FH_PARAM_SET_ID 2
39#define IE_DS_PARAM_SET_ID 3
40#define IE_CF_PARAM_SET_ID 4
41#define IE_TIM_ID 5
42#define IE_IBSS_PARAM_SET_ID 6
43#define IE_COUNTRY_ID 7
44#define IE_HOPPING_PATTERN_PARAMS_ID 8
45#define IE_HOPPING_PATTERN_TABLE_ID 9
46#define IE_REQUEST_ID 10
47#define IE_QBSS_LOAD_ID 11
48#define IE_EDCA_PARAM_SET_ID 12
49#define IE_TRAFFIC_SPEC_ID 13
50#define IE_TRAFFIC_CLASS_ID 14
51#define IE_SCHEDULE_ID 15
52#define IE_CHALLENGE_TEXT_ID 16
53#define IE_POWER_CONSTRAINT_ID 32
54#define IE_POWER_CAPABILITY_ID 33
55#define IE_TPC_REQUEST_ID 34
56#define IE_TPC_REPORT_ID 35
57#define IE_SUPPORTED_CHANNELS_ID 36
58#define IE_CHANNEL_SWITCH_ANNOUNCE_ID 37
59#define IE_MEASUREMENT_REQUEST_ID 38
60#define IE_MEASUREMENT_REPORT_ID 39
61#define IE_QUIET_ID 40
62#define IE_IBSS_DFS_ID 41
63#define IE_ERP_INFO_ID 42
64#define IE_TS_DELAY_ID 43
65#define IE_TCLAS_PROCESSING_ID 44
66#define IE_QOS_CAPABILITY_ID 46
67#define IE_RSN_ID 48
68#define IE_EXTENDED_SUPPORTED_RATES_ID 50
69#define IE_AP_CHANNEL_REPORT_ID 52
70#define IE_RCPI_ID 53
71#define IE_WPA_ID 221
72
73
74/* The maximum number of data references in a signal structure */
75#define UNIFI_MAX_DATA_REFERENCES 2
76
77/* The space to allow for a wire-format signal structure */
78#define UNIFI_PACKED_SIGBUF_SIZE 64
79
80
81/******************************************************************************/
82/* SIGNAL PARAMETER VALUES */
83/******************************************************************************/
84
85/* ifIndex */
86#define UNIFI_IF_2G4 1
87#define UNIFI_IF_5G 2
88
89/* SendProcessId */
90#define HOST_PROC_ID 0xc000
91
92#define SIG_CAP_ESS 0x0001
93#define SIG_CAP_IBSS 0x0002
94#define SIG_CAP_CF_POLLABLE 0x0004
95#define SIG_CAP_CF_POLL_REQUEST 0x0008
96#define SIG_CAP_PRIVACY 0x0010
97#define SIG_CAP_SHORT_PREAMBLE 0x0020
98#define SIG_CAP_DSSSOFDM 0x2000
99
100/******************************************************************************/
101/* FUNCTION DECLARATIONS */
102/******************************************************************************/
103
104/******************************************************************************
105 * SigGetNumDataRefs - Retrieve pointers to data-refs from a signal.
106 *
107 * PARAMETERS:
108 * aSignal - Pointer to signal to retrieve the data refs of.
109 * aDataRef - Address of a pointer to the structure that the data refs
110 * pointers will be stored.
111 *
112 * RETURNS:
113 * The number of data-refs in the signal.
114 */
115s32 SigGetDataRefs(CSR_SIGNAL *aSignal, CSR_DATAREF **aDataRef);
116
117/******************************************************************************
118 * SigGetSize - Retrieve the size (in bytes) of a given signal.
119 *
120 * PARAMETERS:
121 * aSignal - Pointer to signal to retrieve size of.
122 *
123 * RETURNS:
124 * The size (in bytes) of the given signal.
125 */
126s32 SigGetSize(const CSR_SIGNAL *aSignal);
127
128#endif /* __CSR_WIFI_HIP_SIGNALS_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_sigs.h b/drivers/staging/csr/csr_wifi_hip_sigs.h
deleted file mode 100644
index 6112cc3e87fa..000000000000
--- a/drivers/staging/csr/csr_wifi_hip_sigs.h
+++ /dev/null
@@ -1,1417 +0,0 @@
1/*****************************************************************************
2
3 (c) Cambridge Silicon Radio Limited 2011
4 All rights reserved and confidential information of CSR
5
6 Refer to LICENSE.txt included with this source for details
7 on the license terms.
8
9*****************************************************************************/
10
11/* Note: this is an auto-generated file. */
12
13
14/* Generated by hip_dd_l_h_gen.pl */
15
16#ifndef CSR_WIFI_HIP_SIGS_H
17#define CSR_WIFI_HIP_SIGS_H
18
19typedef s16 csr_place_holding_type;
20
21typedef u16 CSR_ASSOCIATION_ID;
22
23typedef u16 CSR_AUTONOMOUS_SCAN_ID;
24
25typedef u16 CSR_BEACON_PERIODS;
26
27typedef u16 CSR_BLACKOUT_ID;
28
29typedef enum CSR_BLACKOUT_SOURCE
30{
31 CSR_DOT11_LOCAL = 0x0000,
32 CSR_DOT11_REMOTE = 0x0001,
33 CSR_OTHER_RADIO = 0x0002,
34 CSR_NOT_LINKED = 0x0004
35} CSR_BLACKOUT_SOURCE;
36
37typedef enum CSR_BLACKOUT_TYPE
38{
39 CSR_LOCAL_DEVICE_ONLY = 0x0001,
40 CSR_SPECIFIED_PEER = 0x0002,
41 CSR_CURRENT_CHANNEL = 0x0004,
42 CSR_P2P = 0x0008
43} CSR_BLACKOUT_TYPE;
44
45typedef enum CSR_BOOT_LOADER_OPERATION
46{
47 CSR_BOOT_LOADER_IDLE = 0x00,
48 CSR_BOOT_LOADER_RESTART = 0x01,
49 CSR_BOOT_LOADER_PATCH = 0x02,
50 CSR_BOOT_LOADER_IMAGE_0 = 0x10,
51 CSR_BOOT_LOADER_IMAGE_1 = 0x11,
52 CSR_BOOT_LOADER_IMAGE_2 = 0x12,
53 CSR_BOOT_LOADER_IMAGE_3 = 0x13
54} CSR_BOOT_LOADER_OPERATION;
55
56typedef u16 CSR_CAPABILITY_INFORMATION;
57
58typedef u16 CSR_CHANNEL_STARTING_FACTOR;
59
60typedef u32 CSR_CIPHER_SUITE_SELECTOR;
61
62typedef u32 CSR_CLIENT_TAG;
63
64typedef enum CSR_CONNECTION_STATUS
65{
66 CSR_DISCONNECTED = 0x0000,
67 CSR_CONNECTED_AWAKE = 0x0001
68} CSR_CONNECTION_STATUS;
69
70typedef s16 CSR_DECIBELS;
71
72typedef enum CSR_DIRECTION
73{
74 CSR_TRANSMIT = 0x0000,
75 CSR_RECEIVE = 0x0001,
76 CSR_BIDIRECTIONAL = 0x0003
77} CSR_DIRECTION;
78
79typedef enum CSR_FRAME_TYPE
80{
81 CSR_RESERVED = 0x0000,
82 CSR_BEACON = 0x0001,
83 CSR_PROBE_RESPONSE = 0x0002,
84 CSR_BEACON_AND_PROBE_RESPONSE = 0x0003,
85 CSR_PROBE_REQUEST = 0x0004
86} CSR_FRAME_TYPE;
87
88typedef u32 CSR_IPV4_ADDRESS;
89
90typedef enum CSR_IFINTERFACE
91{
92 CSR_INDEX_2G4 = 0x0001,
93 CSR_INDEX_5G = 0x0002
94} CSR_IFINTERFACE;
95
96typedef enum CSR_KEY_TYPE
97{
98 CSR_GROUP = 0x0000,
99 CSR_PAIRWISE = 0x0001,
100 CSR_PEER_KEY = 0x0002,
101 CSR_IGTK = 0x0003
102} CSR_KEY_TYPE;
103
104typedef enum CSR_LOADER_OPERATION
105{
106 CSR_LOADER_IDLE = 0x0000,
107 CSR_LOADER_COPY = 0x0001
108} CSR_LOADER_OPERATION;
109
110typedef struct CSR_MAC_ADDRESS
111{
112 u8 x[6];
113} CSR_MACADDRESS;
114
115typedef enum CSR_MIB_STATUS
116{
117 CSR_MIB_SUCCESSFUL = 0x0000,
118 CSR_MIB_INVALID_PARAMETERS = 0x0001,
119 CSR_MIB_WRITE_ONLY = 0x0002,
120 CSR_MIB_READ_ONLY = 0x0003
121} CSR_MIB_STATUS;
122
123typedef enum CSR_MEMORY_SPACE
124{
125 CSR_NONE = 0x00,
126 CSR_SHARED_DATA_MEMORY = 0x01,
127 CSR_EXTERNAL_FLASH_MEMORY = 0x02,
128 CSR_EXTERNAL_SRAM = 0x03,
129 CSR_REGISTERS = 0x04,
130 CSR_PHY_PROCESSOR_DATA_MEMORY = 0x10,
131 CSR_PHY_PROCESSOR_PROGRAM_MEMORY = 0x11,
132 CSR_PHY_PROCESSOR_ROM = 0x12,
133 CSR_MAC_PROCESSOR_DATA_MEMORY = 0x20,
134 CSR_MAC_PROCESSOR_PROGRAM_MEMORY = 0x21,
135 CSR_MAC_PROCESSOR_ROM = 0x22,
136 CSR_BT_PROCESSOR_DATA_MEMORY = 0x30,
137 CSR_BT_PROCESSOR_PROGRAM_MEMORY = 0x31,
138 CSR_BT_PROCESSOR_ROM = 0x32
139} CSR_MEMORY_SPACE;
140
141typedef u16 CSR_MICROSECONDS16;
142
143typedef u32 CSR_MICROSECONDS32;
144
145typedef u16 CSR_NATURAL16;
146
147typedef enum CSR_PS_SCHEME
148{
149 CSR_LEGACY_PS = 0x0001,
150 CSR_U_APSD = 0x0002,
151 CSR_S_APSD = 0x0004
152} CSR_PS_SCHEME;
153
154typedef enum CSR_PACKET_FILTER_MODE
155{
156 CSR_PFM_OPT_OUT = 0x0000,
157 CSR_PFM_OPT_IN = 0x0003
158} CSR_PACKET_FILTER_MODE;
159
160typedef u16 CSR_PERIODIC_ID;
161
162typedef enum CSR_PERIODIC_SCHEDULING_MODE
163{
164 CSR_PSM_PERIODIC_SCHEDULE_PS_POLL = 0x0001,
165 CSR_PSM_PERIODIC_SCHEDULE_PM_BIT = 0x0002,
166 CSR_PSM_PERIODIC_SCHEDULE_UAPSD = 0x0004,
167 CSR_PSM_PERIODIC_SCHEDULE_SAPSD = 0x0008
168} CSR_PERIODIC_SCHEDULING_MODE;
169
170typedef enum CSR_POWER_MANAGEMENT_MODE
171{
172 CSR_PMM_ACTIVE_MODE = 0x0000,
173 CSR_PMM_POWER_SAVE = 0x0001,
174 CSR_PMM_FAST_POWER_SAVE = 0x0002
175} CSR_POWER_MANAGEMENT_MODE;
176
177typedef enum CSR_PRIORITY
178{
179 CSR_QOS_UP0 = 0x0000,
180 CSR_QOS_UP1 = 0x0001,
181 CSR_QOS_UP2 = 0x0002,
182 CSR_QOS_UP3 = 0x0003,
183 CSR_QOS_UP4 = 0x0004,
184 CSR_QOS_UP5 = 0x0005,
185 CSR_QOS_UP6 = 0x0006,
186 CSR_QOS_UP7 = 0x0007,
187 CSR_CONTENTION = 0x8000,
188 CSR_MANAGEMENT = 0x8010
189} CSR_PRIORITY;
190
191typedef enum CSR_REASON_CODE
192{
193 CSR_UNSPECIFIED_REASON = 0x0001,
194 CSR_INVALID_INFORMATION_ELEMENT = 0x000d,
195 CSR_QOS_UNSPECIFIED_REASON = 0x0020,
196 CSR_QOS_EXCESSIVE_NOT_ACK = 0x0022,
197 CSR_QOS_TXOP_LIMIT_EXCEEDED = 0x0023,
198 CSR_QSTA_LEAVING = 0x0024,
199 CSR_UNKNOWN_BA = 0x0026,
200 CSR_UNKNOWN_TS = 0x0026,
201 CSR_TIMEOUT = 0x0027
202} CSR_REASON_CODE;
203
204typedef enum CSR_RECEPTION_STATUS
205{
206 CSR_RX_SUCCESS = 0x0000,
207 CSR_RX_FAILURE_UNSPECIFIED = 0x0001,
208 CSR_MICHAEL_MIC_ERROR = 0x0002,
209 CSR_DECRYPTION_ERROR = 0x0003,
210 CSR_NO_TEMPORAL_KEY_AVAILABLE = 0x0004,
211 CSR_UNSUPPORTED_MODULATION = 0x0011,
212 CSR_BAD_FCS = 0x0012,
213 CSR_BAD_SIGNAL = 0x0013
214} CSR_RECEPTION_STATUS;
215
216typedef enum CSR_RESULT_CODE
217{
218 CSR_RC_SUCCESS = 0x0000,
219 CSR_RC_UNSPECIFIED_FAILURE = 0x0001,
220 CSR_RC_REFUSED = 0x0003,
221 CSR_RC_INVALID_PARAMETERS = 0x0026,
222 CSR_RC_REJECTED_INVALID_IE = 0x0028,
223 CSR_RC_REJECTED_INVALID_GROUP_CIPHER = 0x0029,
224 CSR_RC_REJECTED_INVALID_PAIRWISE_CIPHER = 0x002a,
225 CSR_RC_TIMEOUT = 0x8000,
226 CSR_RC_TOO_MANY_SIMULTANEOUS_REQUESTS = 0x8001,
227 CSR_RC_BSS_ALREADY_STARTED_OR_JOINED = 0x8002,
228 CSR_RC_NOT_SUPPORTED = 0x8003,
229 CSR_RC_TRANSMISSION_FAILURE = 0x8004,
230 CSR_RC_RESET_REQUIRED_BEFORE_START = 0x8006,
231 CSR_RC_INSUFFICIENT_RESOURCE = 0x8007,
232 CSR_RC_NO_BUFFERED_BROADCAST_MULTICAST_FRAMES = 0x8008,
233 CSR_RC_INVALID_UNICAST_CIPHER = 0xf02f,
234 CSR_RC_INVALID_MULTICAST_CIPHER = 0xf030
235} CSR_RESULT_CODE;
236
237typedef enum CSR_SCAN_TYPE
238{
239 CSR_SC_ACTIVE_SCAN = 0x0000,
240 CSR_SC_PASSIVE_SCAN = 0x0001
241} CSR_SCAN_TYPE;
242
243typedef enum CSR_SIGNAL_ID
244{
245 CSR_MA_PACKET_REQUEST_ID = 0x0110,
246 CSR_MA_PACKET_CONFIRM_ID = 0x0111,
247 CSR_MA_PACKET_INDICATION_ID = 0x0113,
248 CSR_MA_PACKET_CANCEL_REQUEST_ID = 0x0114,
249 CSR_MA_VIF_AVAILABILITY_RESPONSE_ID = 0x0116,
250 CSR_MA_VIF_AVAILABILITY_INDICATION_ID = 0x0117,
251 CSR_MA_PACKET_ERROR_INDICATION_ID = 0x011b,
252 CSR_MLME_RESET_REQUEST_ID = 0x0200,
253 CSR_MLME_RESET_CONFIRM_ID = 0x0201,
254 CSR_MLME_GET_REQUEST_ID = 0x0204,
255 CSR_MLME_GET_CONFIRM_ID = 0x0205,
256 CSR_MLME_SET_REQUEST_ID = 0x0208,
257 CSR_MLME_SET_CONFIRM_ID = 0x0209,
258 CSR_MLME_GET_NEXT_REQUEST_ID = 0x020c,
259 CSR_MLME_GET_NEXT_CONFIRM_ID = 0x020d,
260 CSR_MLME_POWERMGT_REQUEST_ID = 0x0210,
261 CSR_MLME_POWERMGT_CONFIRM_ID = 0x0211,
262 CSR_MLME_SCAN_REQUEST_ID = 0x0214,
263 CSR_MLME_SCAN_CONFIRM_ID = 0x0215,
264 CSR_MLME_HL_SYNC_REQUEST_ID = 0x0244,
265 CSR_MLME_HL_SYNC_CONFIRM_ID = 0x0245,
266 CSR_MLME_MEASURE_REQUEST_ID = 0x0258,
267 CSR_MLME_MEASURE_CONFIRM_ID = 0x0259,
268 CSR_MLME_MEASURE_INDICATION_ID = 0x025b,
269 CSR_MLME_SETKEYS_REQUEST_ID = 0x0268,
270 CSR_MLME_SETKEYS_CONFIRM_ID = 0x0269,
271 CSR_MLME_DELETEKEYS_REQUEST_ID = 0x026c,
272 CSR_MLME_DELETEKEYS_CONFIRM_ID = 0x026d,
273 CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID = 0x0287,
274 CSR_MLME_CONNECTED_INDICATION_ID = 0x028b,
275 CSR_MLME_SCAN_CANCEL_REQUEST_ID = 0x028c,
276 CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID = 0x0298,
277 CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID = 0x0299,
278 CSR_MLME_ADD_PERIODIC_REQUEST_ID = 0x02a0,
279 CSR_MLME_ADD_PERIODIC_CONFIRM_ID = 0x02a1,
280 CSR_MLME_DEL_PERIODIC_REQUEST_ID = 0x02a4,
281 CSR_MLME_DEL_PERIODIC_CONFIRM_ID = 0x02a5,
282 CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID = 0x02a8,
283 CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID = 0x02a9,
284 CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID = 0x02ac,
285 CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID = 0x02ad,
286 CSR_MLME_SET_PACKET_FILTER_REQUEST_ID = 0x02b8,
287 CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID = 0x02b9,
288 CSR_MLME_STOP_MEASURE_REQUEST_ID = 0x02bc,
289 CSR_MLME_STOP_MEASURE_CONFIRM_ID = 0x02bd,
290 CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID = 0x02cc,
291 CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID = 0x02cd,
292 CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID = 0x02db,
293 CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID = 0x02dc,
294 CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID = 0x02dd,
295 CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID = 0x02e0,
296 CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID = 0x02e1,
297 CSR_MLME_TRIGGERED_GET_INDICATION_ID = 0x02e7,
298 CSR_MLME_ADD_BLACKOUT_REQUEST_ID = 0x02f8,
299 CSR_MLME_ADD_BLACKOUT_CONFIRM_ID = 0x02f9,
300 CSR_MLME_BLACKOUT_ENDED_INDICATION_ID = 0x02fb,
301 CSR_MLME_DEL_BLACKOUT_REQUEST_ID = 0x02fc,
302 CSR_MLME_DEL_BLACKOUT_CONFIRM_ID = 0x02fd,
303 CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID = 0x0304,
304 CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID = 0x0305,
305 CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID = 0x0308,
306 CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID = 0x0309,
307 CSR_MLME_CONNECT_STATUS_REQUEST_ID = 0x0310,
308 CSR_MLME_CONNECT_STATUS_CONFIRM_ID = 0x0311,
309 CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID = 0x0314,
310 CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID = 0x0315,
311 CSR_MLME_ADD_TEMPLATE_REQUEST_ID = 0x0318,
312 CSR_MLME_ADD_TEMPLATE_CONFIRM_ID = 0x0319,
313 CSR_MLME_CONFIG_QUEUE_REQUEST_ID = 0x031c,
314 CSR_MLME_CONFIG_QUEUE_CONFIRM_ID = 0x031d,
315 CSR_MLME_ADD_TSPEC_REQUEST_ID = 0x0320,
316 CSR_MLME_ADD_TSPEC_CONFIRM_ID = 0x0321,
317 CSR_MLME_DEL_TSPEC_REQUEST_ID = 0x0324,
318 CSR_MLME_DEL_TSPEC_CONFIRM_ID = 0x0325,
319 CSR_MLME_START_AGGREGATION_REQUEST_ID = 0x0328,
320 CSR_MLME_START_AGGREGATION_CONFIRM_ID = 0x0329,
321 CSR_MLME_BLOCKACK_ERROR_INDICATION_ID = 0x032b,
322 CSR_MLME_STOP_AGGREGATION_REQUEST_ID = 0x032c,
323 CSR_MLME_STOP_AGGREGATION_CONFIRM_ID = 0x032d,
324 CSR_MLME_SM_START_REQUEST_ID = 0x0334,
325 CSR_MLME_SM_START_CONFIRM_ID = 0x0335,
326 CSR_MLME_LEAVE_REQUEST_ID = 0x0338,
327 CSR_MLME_LEAVE_CONFIRM_ID = 0x0339,
328 CSR_MLME_SET_TIM_REQUEST_ID = 0x033c,
329 CSR_MLME_SET_TIM_CONFIRM_ID = 0x033d,
330 CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID = 0x0340,
331 CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID = 0x0341,
332 CSR_MLME_SET_CHANNEL_REQUEST_ID = 0x034c,
333 CSR_MLME_SET_CHANNEL_CONFIRM_ID = 0x034d,
334 CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID = 0x040c,
335 CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID = 0x040d,
336 CSR_DEBUG_STRING_INDICATION_ID = 0x0803,
337 CSR_DEBUG_WORD16_INDICATION_ID = 0x0807,
338 CSR_DEBUG_GENERIC_REQUEST_ID = 0x0808,
339 CSR_DEBUG_GENERIC_CONFIRM_ID = 0x0809,
340 CSR_DEBUG_GENERIC_INDICATION_ID = 0x080b
341} CSR_SIGNAL_ID;
342
343typedef u16 CSR_SIMPLE_POINTER;
344
345typedef u16 CSR_STARTING_SEQUENCE_NUMBER;
346
347typedef enum CSR_SYMBOL_ID
348{
349 CSR_SLT_END = 0x0000,
350 CSR_SLT_PCI_SLOT_CONFIG = 0x0001,
351 CSR_SLT_SDIO_SLOT_CONFIG = 0x0002,
352 CSR_SLT_BUILD_ID_NUMBER = 0x0003,
353 CSR_SLT_BUILD_ID_STRING = 0x0004,
354 CSR_SLT_PERSISTENT_STORE_DB = 0x0005,
355 CSR_SLT_RESET_VECTOR_PHY = 0x0006,
356 CSR_SLT_RESET_VECTOR_MAC = 0x0007,
357 CSR_SLT_SDIO_LOADER_CONTROL = 0x0008,
358 CSR_SLT_TEST_CMD = 0x0009,
359 CSR_SLT_TEST_ALIVE_COUNTER = 0x000a,
360 CSR_SLT_TEST_PARAMETERS = 0x000b,
361 CSR_SLT_TEST_RESULTS = 0x000c,
362 CSR_SLT_TEST_VERSION = 0x000d,
363 CSR_SLT_MIB_PSID_RANGES = 0x000e,
364 CSR_SLT_KIP_TABLE = 0x000f,
365 CSR_SLT_PANIC_DATA_PHY = 0x0010,
366 CSR_SLT_PANIC_DATA_MAC = 0x0011,
367 CSR_SLT_BOOT_LOADER_CONTROL = 0x0012,
368 CSR_SLT_SOFT_MAC = 0x0013
369} CSR_SYMBOL_ID;
370
371typedef struct CSR_TSF_TIME
372{
373 u8 x[8];
374} CSR_TSF_TIME;
375
376typedef u16 CSR_TIME_UNITS;
377
378typedef enum CSR_TRANSMISSION_CONTROL
379{
380 CSR_TRIGGERED = 0x0001,
381 CSR_END_OF_SERVICE = 0x0002,
382 CSR_NO_CONFIRM_REQUIRED = 0x0004,
383 CSR_ALLOW_BA = 0x0008
384} CSR_TRANSMISSION_CONTROL;
385
386typedef enum CSR_TRANSMISSION_STATUS
387{
388 CSR_TX_SUCCESSFUL = 0x0000,
389 CSR_TX_RETRY_LIMIT = 0x0001,
390 CSR_TX_LIFETIME = 0x0002,
391 CSR_TX_NO_BSS = 0x0003,
392 CSR_TX_EXCESSIVE_DATA_LENGTH = 0x0004,
393 CSR_TX_UNSUPPORTED_PRIORITY = 0x0006,
394 CSR_TX_UNAVAILABLE_PRIORITY = 0x0007,
395 CSR_TX_UNAVAILABLE_KEY_MAPPING = 0x000a,
396 CSR_TX_EDCA_TIMEOUT = 0x000b,
397 CSR_TX_BLOCK_ACK_TIMEOUT = 0x000c,
398 CSR_TX_FAIL_TRANSMISSION_VIF_INTERRUPTED = 0x000d,
399 CSR_TX_REJECTED_PEER_STATION_SLEEPING = 0x000e,
400 CSR_TX_REJECTED_DTIM_ENDED = 0x000f,
401 CSR_TX_REJECTED_DTIM_STARTED = 0x0010
402} CSR_TRANSMISSION_STATUS;
403
404typedef u16 CSR_TRIGGER_ID;
405
406typedef u16 CSR_TRIGGERED_ID;
407
408typedef enum CSR_HIP_VERSIONS
409{
410 CSR_HIP_ENG_VERSION = 0x0001,
411 CSR_HIP_VERSION = 0x0900
412} CSR_HIP_VERSIONS;
413
414typedef u16 CSR_BUFFER_HANDLE;
415
416typedef u16 CSR_CHANNEL_NUMBER;
417
418typedef struct CSR_DATA_REFERENCE
419{
420 u16 SlotNumber;
421 u16 DataLength;
422} CSR_DATAREF;
423
424typedef u16 CSR_DIALOG_TOKEN;
425
426typedef struct CSR_GENERIC_POINTER
427{
428 u32 MemoryOffset;
429 CSR_MEMORY_SPACE MemorySpace;
430} CSR_GENERIC_POINTER;
431
432typedef struct CSR_MLME_CONFIG_QUEUE_CONFIRM
433{
434 CSR_DATAREF Dummydataref1;
435 CSR_DATAREF Dummydataref2;
436 CSR_RESULT_CODE ResultCode;
437} CSR_MLME_CONFIG_QUEUE_CONFIRM;
438
439typedef struct CSR_MLME_CONFIG_QUEUE_REQUEST
440{
441 CSR_DATAREF Dummydataref1;
442 CSR_DATAREF Dummydataref2;
443 CSR_NATURAL16 QueueIndex;
444 CSR_NATURAL16 Aifs;
445 CSR_NATURAL16 Cwmin;
446 CSR_NATURAL16 Cwmax;
447 CSR_NATURAL16 TxopLimit;
448} CSR_MLME_CONFIG_QUEUE_REQUEST;
449
450typedef struct CSR_MLME_GET_CONFIRM
451{
452 CSR_DATAREF MibAttributeValue;
453 CSR_DATAREF Dummydataref2;
454 CSR_MIB_STATUS Status;
455 CSR_NATURAL16 ErrorIndex;
456} CSR_MLME_GET_CONFIRM;
457
458typedef struct CSR_MLME_GET_REQUEST
459{
460 CSR_DATAREF MibAttribute;
461 CSR_DATAREF Dummydataref2;
462} CSR_MLME_GET_REQUEST;
463
464typedef struct CSR_MLME_GET_NEXT_CONFIRM
465{
466 CSR_DATAREF MibAttributeValue;
467 CSR_DATAREF Dummydataref2;
468 CSR_MIB_STATUS Status;
469 CSR_NATURAL16 ErrorIndex;
470} CSR_MLME_GET_NEXT_CONFIRM;
471
472typedef struct CSR_MLME_GET_NEXT_REQUEST
473{
474 CSR_DATAREF MibAttribute;
475 CSR_DATAREF Dummydataref2;
476} CSR_MLME_GET_NEXT_REQUEST;
477
478typedef struct CSR_MLME_HL_SYNC_CONFIRM
479{
480 CSR_DATAREF Dummydataref1;
481 CSR_DATAREF Dummydataref2;
482 CSR_MACADDRESS GroupAddress;
483 CSR_RESULT_CODE ResultCode;
484} CSR_MLME_HL_SYNC_CONFIRM;
485
486typedef struct CSR_MLME_HL_SYNC_REQUEST
487{
488 CSR_DATAREF Dummydataref1;
489 CSR_DATAREF Dummydataref2;
490 CSR_MACADDRESS GroupAddress;
491} CSR_MLME_HL_SYNC_REQUEST;
492
493typedef struct CSR_MLME_HL_SYNC_CANCEL_CONFIRM
494{
495 CSR_DATAREF Dummydataref1;
496 CSR_DATAREF Dummydataref2;
497 CSR_RESULT_CODE ResultCode;
498} CSR_MLME_HL_SYNC_CANCEL_CONFIRM;
499
500typedef struct CSR_MLME_HL_SYNC_CANCEL_REQUEST
501{
502 CSR_DATAREF Dummydataref1;
503 CSR_DATAREF Dummydataref2;
504 CSR_MACADDRESS GroupAddress;
505} CSR_MLME_HL_SYNC_CANCEL_REQUEST;
506
507typedef struct CSR_MLME_MEASURE_CONFIRM
508{
509 CSR_DATAREF Dummydataref1;
510 CSR_DATAREF Dummydataref2;
511 CSR_RESULT_CODE ResultCode;
512 CSR_DIALOG_TOKEN DialogToken;
513} CSR_MLME_MEASURE_CONFIRM;
514
515typedef struct CSR_MLME_MEASURE_INDICATION
516{
517 CSR_DATAREF MeasurementReportSet;
518 CSR_DATAREF Dummydataref2;
519 CSR_DIALOG_TOKEN DialogToken;
520} CSR_MLME_MEASURE_INDICATION;
521
522typedef struct CSR_MLME_MEASURE_REQUEST
523{
524 CSR_DATAREF MeasurementRequestSet;
525 CSR_DATAREF Dummydataref2;
526 CSR_DIALOG_TOKEN DialogToken;
527} CSR_MLME_MEASURE_REQUEST;
528
529typedef struct CSR_MLME_RESET_CONFIRM
530{
531 CSR_DATAREF Dummydataref1;
532 CSR_DATAREF Dummydataref2;
533 CSR_RESULT_CODE ResultCode;
534} CSR_MLME_RESET_CONFIRM;
535
536typedef struct CSR_MLME_RESET_REQUEST
537{
538 CSR_DATAREF Dummydataref1;
539 CSR_DATAREF Dummydataref2;
540 CSR_MACADDRESS StaAddress;
541 s16 SetDefaultMib;
542} CSR_MLME_RESET_REQUEST;
543
544typedef struct CSR_MLME_SET_CONFIRM
545{
546 CSR_DATAREF MibAttributeValue;
547 CSR_DATAREF Dummydataref2;
548 CSR_MIB_STATUS Status;
549 CSR_NATURAL16 ErrorIndex;
550} CSR_MLME_SET_CONFIRM;
551
552typedef struct CSR_MLME_SET_REQUEST
553{
554 CSR_DATAREF MibAttributeValue;
555 CSR_DATAREF Dummydataref2;
556} CSR_MLME_SET_REQUEST;
557
558typedef struct CSR_MLME_STOP_MEASURE_CONFIRM
559{
560 CSR_DATAREF Dummydataref1;
561 CSR_DATAREF Dummydataref2;
562 CSR_RESULT_CODE ResultCode;
563 CSR_DIALOG_TOKEN DialogToken;
564} CSR_MLME_STOP_MEASURE_CONFIRM;
565
566typedef struct CSR_MLME_STOP_MEASURE_REQUEST
567{
568 CSR_DATAREF Dummydataref1;
569 CSR_DATAREF Dummydataref2;
570 CSR_DIALOG_TOKEN DialogToken;
571} CSR_MLME_STOP_MEASURE_REQUEST;
572
573typedef u16 CSR_PROCESS_ID;
574
575typedef u16 CSR_RATE;
576
577typedef u16 CSR_SEQUENCE_NUMBER;
578
579typedef struct CSR_SIGNAL_PRIMITIVE_HEADER
580{
581 s16 SignalId;
582 CSR_PROCESS_ID ReceiverProcessId;
583 CSR_PROCESS_ID SenderProcessId;
584} CSR_SIGNAL_PRIMITIVE_HEADER;
585
586typedef u16 CSR_TRAFFIC_WINDOW;
587
588typedef u16 CSR_VIF_IDENTIFIER;
589
590typedef struct CSR_DEBUG_GENERIC_CONFIRM
591{
592 CSR_DATAREF DebugVariable;
593 CSR_DATAREF Dummydataref2;
594 CSR_NATURAL16 DebugWords[8];
595} CSR_DEBUG_GENERIC_CONFIRM;
596
597typedef struct CSR_DEBUG_GENERIC_INDICATION
598{
599 CSR_DATAREF DebugVariable;
600 CSR_DATAREF Dummydataref2;
601 CSR_NATURAL16 DebugWords[8];
602} CSR_DEBUG_GENERIC_INDICATION;
603
604typedef struct CSR_DEBUG_GENERIC_REQUEST
605{
606 CSR_DATAREF DebugVariable;
607 CSR_DATAREF Dummydataref2;
608 CSR_NATURAL16 DebugWords[8];
609} CSR_DEBUG_GENERIC_REQUEST;
610
611typedef struct CSR_DEBUG_STRING_INDICATION
612{
613 CSR_DATAREF DebugMessage;
614 CSR_DATAREF Dummydataref2;
615} CSR_DEBUG_STRING_INDICATION;
616
617typedef struct CSR_DEBUG_WORD16_INDICATION
618{
619 CSR_DATAREF Dummydataref1;
620 CSR_DATAREF Dummydataref2;
621 CSR_NATURAL16 DebugWords[16];
622} CSR_DEBUG_WORD16_INDICATION;
623
624typedef struct CSR_MA_PACKET_CONFIRM
625{
626 CSR_DATAREF Dummydataref1;
627 CSR_DATAREF Dummydataref2;
628 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
629 CSR_TRANSMISSION_STATUS TransmissionStatus;
630 CSR_NATURAL16 RetryCount;
631 CSR_RATE Rate;
632 CSR_CLIENT_TAG HostTag;
633} CSR_MA_PACKET_CONFIRM;
634
635typedef struct CSR_MA_PACKET_INDICATION
636{
637 CSR_DATAREF Data;
638 CSR_DATAREF Dummydataref2;
639 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
640 CSR_TSF_TIME LocalTime;
641 CSR_IFINTERFACE Ifindex;
642 CSR_CHANNEL_NUMBER Channel;
643 CSR_RECEPTION_STATUS ReceptionStatus;
644 CSR_DECIBELS Rssi;
645 CSR_DECIBELS Snr;
646 CSR_RATE ReceivedRate;
647} CSR_MA_PACKET_INDICATION;
648
649typedef struct CSR_MA_PACKET_REQUEST
650{
651 CSR_DATAREF Data;
652 CSR_DATAREF Dummydataref2;
653 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
654 CSR_RATE TransmitRate;
655 CSR_CLIENT_TAG HostTag;
656 CSR_PRIORITY Priority;
657 CSR_MACADDRESS Ra;
658 CSR_TRANSMISSION_CONTROL TransmissionControl;
659} CSR_MA_PACKET_REQUEST;
660
661typedef struct CSR_MA_PACKET_CANCEL_REQUEST
662{
663 CSR_DATAREF Dummydataref1;
664 CSR_DATAREF Dummydataref2;
665 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
666 CSR_CLIENT_TAG HostTag;
667} CSR_MA_PACKET_CANCEL_REQUEST;
668
669typedef struct CSR_MA_PACKET_ERROR_INDICATION
670{
671 CSR_DATAREF Dummydataref1;
672 CSR_DATAREF Dummydataref2;
673 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
674 CSR_MACADDRESS PeerQstaAddress;
675 CSR_PRIORITY UserPriority;
676 CSR_SEQUENCE_NUMBER SequenceNumber;
677} CSR_MA_PACKET_ERROR_INDICATION;
678
679typedef struct CSR_MA_VIF_AVAILABILITY_INDICATION
680{
681 CSR_DATAREF Dummydataref1;
682 CSR_DATAREF Dummydataref2;
683 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
684 s16 Multicast;
685} CSR_MA_VIF_AVAILABILITY_INDICATION;
686
687typedef struct CSR_MA_VIF_AVAILABILITY_RESPONSE
688{
689 CSR_DATAREF Dummydataref1;
690 CSR_DATAREF Dummydataref2;
691 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
692 CSR_RESULT_CODE ResultCode;
693} CSR_MA_VIF_AVAILABILITY_RESPONSE;
694
695typedef struct CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM
696{
697 CSR_DATAREF Dummydataref1;
698 CSR_DATAREF Dummydataref2;
699 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
700 CSR_RESULT_CODE ResultCode;
701 CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
702} CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM;
703
704typedef struct CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST
705{
706 CSR_DATAREF ChannelList;
707 CSR_DATAREF InformationElements;
708 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
709 CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
710 CSR_IFINTERFACE Ifindex;
711 CSR_CHANNEL_STARTING_FACTOR ChannelStartingFactor;
712 CSR_SCAN_TYPE ScanType;
713 CSR_MICROSECONDS32 ProbeDelay;
714 CSR_TIME_UNITS MinChannelTime;
715 CSR_TIME_UNITS MaxChannelTime;
716} CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST;
717
718typedef struct CSR_MLME_ADD_BLACKOUT_CONFIRM
719{
720 CSR_DATAREF Dummydataref1;
721 CSR_DATAREF Dummydataref2;
722 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
723 CSR_BLACKOUT_ID BlackoutId;
724 CSR_RESULT_CODE ResultCode;
725} CSR_MLME_ADD_BLACKOUT_CONFIRM;
726
727typedef struct CSR_MLME_ADD_BLACKOUT_REQUEST
728{
729 CSR_DATAREF Dummydataref1;
730 CSR_DATAREF Dummydataref2;
731 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
732 CSR_BLACKOUT_ID BlackoutId;
733 CSR_BLACKOUT_TYPE BlackoutType;
734 CSR_BLACKOUT_SOURCE BlackoutSource;
735 CSR_MICROSECONDS32 BlackoutStartReference;
736 CSR_MICROSECONDS32 BlackoutPeriod;
737 CSR_MICROSECONDS32 BlackoutDuration;
738 CSR_MACADDRESS PeerStaAddress;
739 CSR_NATURAL16 BlackoutCount;
740} CSR_MLME_ADD_BLACKOUT_REQUEST;
741
742typedef struct CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM
743{
744 CSR_DATAREF Dummydataref1;
745 CSR_DATAREF Dummydataref2;
746 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
747 CSR_RESULT_CODE ResultCode;
748} CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM;
749
750typedef struct CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST
751{
752 CSR_DATAREF Data;
753 CSR_DATAREF Dummydataref2;
754 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
755 CSR_NATURAL16 NumberOfMulticastGroupAddresses;
756} CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST;
757
758typedef struct CSR_MLME_ADD_PERIODIC_CONFIRM
759{
760 CSR_DATAREF Dummydataref1;
761 CSR_DATAREF Dummydataref2;
762 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
763 CSR_PERIODIC_ID PeriodicId;
764 CSR_RESULT_CODE ResultCode;
765} CSR_MLME_ADD_PERIODIC_CONFIRM;
766
767typedef struct CSR_MLME_ADD_PERIODIC_REQUEST
768{
769 CSR_DATAREF Dummydataref1;
770 CSR_DATAREF Dummydataref2;
771 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
772 CSR_PERIODIC_ID PeriodicId;
773 CSR_MICROSECONDS32 MaximumLatency;
774 CSR_PERIODIC_SCHEDULING_MODE PeriodicSchedulingMode;
775 s16 WakeHost;
776 CSR_PRIORITY UserPriority;
777} CSR_MLME_ADD_PERIODIC_REQUEST;
778
779typedef struct CSR_MLME_ADD_RX_TRIGGER_CONFIRM
780{
781 CSR_DATAREF Dummydataref1;
782 CSR_DATAREF Dummydataref2;
783 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
784 CSR_TRIGGER_ID TriggerId;
785 CSR_RESULT_CODE ResultCode;
786} CSR_MLME_ADD_RX_TRIGGER_CONFIRM;
787
788typedef struct CSR_MLME_ADD_RX_TRIGGER_REQUEST
789{
790 CSR_DATAREF InformationElements;
791 CSR_DATAREF Dummydataref2;
792 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
793 CSR_TRIGGER_ID TriggerId;
794 CSR_PRIORITY Priority;
795} CSR_MLME_ADD_RX_TRIGGER_REQUEST;
796
797typedef struct CSR_MLME_ADD_TEMPLATE_CONFIRM
798{
799 CSR_DATAREF Dummydataref1;
800 CSR_DATAREF Dummydataref2;
801 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
802 CSR_FRAME_TYPE FrameType;
803 CSR_RESULT_CODE ResultCode;
804} CSR_MLME_ADD_TEMPLATE_CONFIRM;
805
806typedef struct CSR_MLME_ADD_TEMPLATE_REQUEST
807{
808 CSR_DATAREF Data1;
809 CSR_DATAREF Data2;
810 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
811 CSR_FRAME_TYPE FrameType;
812 CSR_RATE MinTransmitRate;
813} CSR_MLME_ADD_TEMPLATE_REQUEST;
814
815typedef struct CSR_MLME_ADD_TRIGGERED_GET_CONFIRM
816{
817 CSR_DATAREF Dummydataref1;
818 CSR_DATAREF Dummydataref2;
819 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
820 CSR_RESULT_CODE ResultCode;
821 CSR_TRIGGERED_ID TriggeredId;
822} CSR_MLME_ADD_TRIGGERED_GET_CONFIRM;
823
824typedef struct CSR_MLME_ADD_TRIGGERED_GET_REQUEST
825{
826 CSR_DATAREF MibAttribute;
827 CSR_DATAREF Dummydataref2;
828 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
829 CSR_TRIGGERED_ID TriggeredId;
830} CSR_MLME_ADD_TRIGGERED_GET_REQUEST;
831
832typedef struct CSR_MLME_ADD_TSPEC_CONFIRM
833{
834 CSR_DATAREF Dummydataref1;
835 CSR_DATAREF Dummydataref2;
836 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
837 CSR_PRIORITY UserPriority;
838 CSR_RESULT_CODE ResultCode;
839} CSR_MLME_ADD_TSPEC_CONFIRM;
840
841typedef struct CSR_MLME_ADD_TSPEC_REQUEST
842{
843 CSR_DATAREF Dummydataref1;
844 CSR_DATAREF Dummydataref2;
845 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
846 CSR_PRIORITY UserPriority;
847 CSR_DIRECTION Direction;
848 CSR_PS_SCHEME PsScheme;
849 CSR_NATURAL16 MediumTime;
850 CSR_MICROSECONDS32 ServiceStartTime;
851 CSR_MICROSECONDS32 ServiceInterval;
852 CSR_RATE MinimumDataRate;
853} CSR_MLME_ADD_TSPEC_REQUEST;
854
855typedef struct CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION
856{
857 CSR_DATAREF Dummydataref1;
858 CSR_DATAREF Dummydataref2;
859 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
860 CSR_RESULT_CODE ResultCode;
861 CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
862} CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION;
863
864typedef struct CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION
865{
866 CSR_DATAREF Dummydataref1;
867 CSR_DATAREF Dummydataref2;
868 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
869 CSR_MACADDRESS Bssid;
870} CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION;
871
872typedef struct CSR_MLME_BLACKOUT_ENDED_INDICATION
873{
874 CSR_DATAREF Dummydataref1;
875 CSR_DATAREF Dummydataref2;
876 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
877 CSR_BLACKOUT_ID BlackoutId;
878} CSR_MLME_BLACKOUT_ENDED_INDICATION;
879
880typedef struct CSR_MLME_BLOCKACK_ERROR_INDICATION
881{
882 CSR_DATAREF Dummydataref1;
883 CSR_DATAREF Dummydataref2;
884 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
885 CSR_REASON_CODE ResultCode;
886 CSR_MACADDRESS PeerQstaAddress;
887} CSR_MLME_BLOCKACK_ERROR_INDICATION;
888
889typedef struct CSR_MLME_CONNECTED_INDICATION
890{
891 CSR_DATAREF Dummydataref1;
892 CSR_DATAREF Dummydataref2;
893 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
894 CSR_CONNECTION_STATUS ConnectionStatus;
895 CSR_MACADDRESS PeerMacAddress;
896} CSR_MLME_CONNECTED_INDICATION;
897
898typedef struct CSR_MLME_CONNECT_STATUS_CONFIRM
899{
900 CSR_DATAREF Dummydataref1;
901 CSR_DATAREF Dummydataref2;
902 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
903 CSR_RESULT_CODE ResultCode;
904} CSR_MLME_CONNECT_STATUS_CONFIRM;
905
906typedef struct CSR_MLME_CONNECT_STATUS_REQUEST
907{
908 CSR_DATAREF InformationElements;
909 CSR_DATAREF Dummydataref2;
910 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
911 CSR_CONNECTION_STATUS ConnectionStatus;
912 CSR_MACADDRESS StaAddress;
913 CSR_ASSOCIATION_ID AssociationId;
914 CSR_CAPABILITY_INFORMATION AssociationCapabilityInformation;
915} CSR_MLME_CONNECT_STATUS_REQUEST;
916
917typedef struct CSR_MLME_DELETEKEYS_CONFIRM
918{
919 CSR_DATAREF Dummydataref1;
920 CSR_DATAREF Dummydataref2;
921 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
922 CSR_RESULT_CODE ResultCode;
923} CSR_MLME_DELETEKEYS_CONFIRM;
924
925typedef struct CSR_MLME_DELETEKEYS_REQUEST
926{
927 CSR_DATAREF Dummydataref1;
928 CSR_DATAREF Dummydataref2;
929 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
930 CSR_NATURAL16 KeyId;
931 CSR_KEY_TYPE KeyType;
932 CSR_MACADDRESS Address;
933} CSR_MLME_DELETEKEYS_REQUEST;
934
935typedef struct CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM
936{
937 CSR_DATAREF Dummydataref1;
938 CSR_DATAREF Dummydataref2;
939 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
940 CSR_RESULT_CODE ResultCode;
941 CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
942} CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM;
943
944typedef struct CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST
945{
946 CSR_DATAREF Dummydataref1;
947 CSR_DATAREF Dummydataref2;
948 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
949 CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
950} CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST;
951
952typedef struct CSR_MLME_DEL_BLACKOUT_CONFIRM
953{
954 CSR_DATAREF Dummydataref1;
955 CSR_DATAREF Dummydataref2;
956 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
957 CSR_BLACKOUT_ID BlackoutId;
958 CSR_RESULT_CODE ResultCode;
959} CSR_MLME_DEL_BLACKOUT_CONFIRM;
960
961typedef struct CSR_MLME_DEL_BLACKOUT_REQUEST
962{
963 CSR_DATAREF Dummydataref1;
964 CSR_DATAREF Dummydataref2;
965 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
966 CSR_BLACKOUT_ID BlackoutId;
967} CSR_MLME_DEL_BLACKOUT_REQUEST;
968
969typedef struct CSR_MLME_DEL_PERIODIC_CONFIRM
970{
971 CSR_DATAREF Dummydataref1;
972 CSR_DATAREF Dummydataref2;
973 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
974 CSR_PERIODIC_ID PeriodicId;
975 CSR_RESULT_CODE ResultCode;
976} CSR_MLME_DEL_PERIODIC_CONFIRM;
977
978typedef struct CSR_MLME_DEL_PERIODIC_REQUEST
979{
980 CSR_DATAREF Dummydataref1;
981 CSR_DATAREF Dummydataref2;
982 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
983 CSR_PERIODIC_ID PeriodicId;
984} CSR_MLME_DEL_PERIODIC_REQUEST;
985
986typedef struct CSR_MLME_DEL_RX_TRIGGER_CONFIRM
987{
988 CSR_DATAREF Dummydataref1;
989 CSR_DATAREF Dummydataref2;
990 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
991 CSR_TRIGGER_ID TriggerId;
992 CSR_RESULT_CODE ResultCode;
993} CSR_MLME_DEL_RX_TRIGGER_CONFIRM;
994
995typedef struct CSR_MLME_DEL_RX_TRIGGER_REQUEST
996{
997 CSR_DATAREF Dummydataref1;
998 CSR_DATAREF Dummydataref2;
999 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1000 CSR_TRIGGER_ID TriggerId;
1001} CSR_MLME_DEL_RX_TRIGGER_REQUEST;
1002
1003typedef struct CSR_MLME_DEL_TRIGGERED_GET_CONFIRM
1004{
1005 CSR_DATAREF Dummydataref1;
1006 CSR_DATAREF Dummydataref2;
1007 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1008 CSR_RESULT_CODE ResultCode;
1009 CSR_TRIGGERED_ID TriggeredId;
1010} CSR_MLME_DEL_TRIGGERED_GET_CONFIRM;
1011
1012typedef struct CSR_MLME_DEL_TRIGGERED_GET_REQUEST
1013{
1014 CSR_DATAREF Dummydataref1;
1015 CSR_DATAREF Dummydataref2;
1016 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1017 CSR_TRIGGERED_ID TriggeredId;
1018} CSR_MLME_DEL_TRIGGERED_GET_REQUEST;
1019
1020typedef struct CSR_MLME_DEL_TSPEC_CONFIRM
1021{
1022 CSR_DATAREF Dummydataref1;
1023 CSR_DATAREF Dummydataref2;
1024 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1025 CSR_PRIORITY UserPriority;
1026 CSR_RESULT_CODE ResultCode;
1027} CSR_MLME_DEL_TSPEC_CONFIRM;
1028
1029typedef struct CSR_MLME_DEL_TSPEC_REQUEST
1030{
1031 CSR_DATAREF Dummydataref1;
1032 CSR_DATAREF Dummydataref2;
1033 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1034 CSR_PRIORITY UserPriority;
1035 CSR_DIRECTION Direction;
1036} CSR_MLME_DEL_TSPEC_REQUEST;
1037
1038typedef struct CSR_MLME_GET_KEY_SEQUENCE_CONFIRM
1039{
1040 CSR_DATAREF Dummydataref1;
1041 CSR_DATAREF Dummydataref2;
1042 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1043 CSR_RESULT_CODE ResultCode;
1044 CSR_NATURAL16 SequenceNumber[8];
1045} CSR_MLME_GET_KEY_SEQUENCE_CONFIRM;
1046
1047typedef struct CSR_MLME_GET_KEY_SEQUENCE_REQUEST
1048{
1049 CSR_DATAREF Dummydataref1;
1050 CSR_DATAREF Dummydataref2;
1051 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1052 CSR_NATURAL16 KeyId;
1053 CSR_KEY_TYPE KeyType;
1054 CSR_MACADDRESS Address;
1055} CSR_MLME_GET_KEY_SEQUENCE_REQUEST;
1056
1057typedef struct CSR_MLME_LEAVE_CONFIRM
1058{
1059 CSR_DATAREF Dummydataref1;
1060 CSR_DATAREF Dummydataref2;
1061 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1062 CSR_RESULT_CODE ResultCode;
1063} CSR_MLME_LEAVE_CONFIRM;
1064
1065typedef struct CSR_MLME_LEAVE_REQUEST
1066{
1067 CSR_DATAREF Dummydataref1;
1068 CSR_DATAREF Dummydataref2;
1069 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1070} CSR_MLME_LEAVE_REQUEST;
1071
1072typedef struct CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM
1073{
1074 CSR_DATAREF Dummydataref1;
1075 CSR_DATAREF Dummydataref2;
1076 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1077 CSR_RESULT_CODE ResultCode;
1078} CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM;
1079
1080typedef struct CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST
1081{
1082 CSR_DATAREF Data;
1083 CSR_DATAREF Dummydataref2;
1084 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1085 CSR_TIME_UNITS BeaconPeriod;
1086 CSR_BEACON_PERIODS DtimPeriod;
1087 CSR_CAPABILITY_INFORMATION CapabilityInformation;
1088 CSR_MACADDRESS Bssid;
1089 CSR_NATURAL16 RtsThreshold;
1090} CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST;
1091
1092typedef struct CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM
1093{
1094 CSR_DATAREF Dummydataref1;
1095 CSR_DATAREF Dummydataref2;
1096 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1097 CSR_RESULT_CODE ResultCode;
1098 CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
1099} CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM;
1100
1101typedef struct CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST
1102{
1103 CSR_DATAREF Dummydataref1;
1104 CSR_DATAREF Dummydataref2;
1105 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1106 CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
1107 s16 Pause;
1108} CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST;
1109
1110typedef struct CSR_MLME_POWERMGT_CONFIRM
1111{
1112 CSR_DATAREF Dummydataref1;
1113 CSR_DATAREF Dummydataref2;
1114 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1115 CSR_RESULT_CODE ResultCode;
1116} CSR_MLME_POWERMGT_CONFIRM;
1117
1118typedef struct CSR_MLME_POWERMGT_REQUEST
1119{
1120 CSR_DATAREF Dummydataref1;
1121 CSR_DATAREF Dummydataref2;
1122 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1123 CSR_POWER_MANAGEMENT_MODE PowerManagementMode;
1124 s16 ReceiveDtims;
1125 CSR_BEACON_PERIODS ListenInterval;
1126 CSR_TRAFFIC_WINDOW TrafficWindow;
1127} CSR_MLME_POWERMGT_REQUEST;
1128
1129typedef struct CSR_MLME_SCAN_CONFIRM
1130{
1131 CSR_DATAREF Dummydataref1;
1132 CSR_DATAREF Dummydataref2;
1133 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1134 CSR_RESULT_CODE ResultCode;
1135} CSR_MLME_SCAN_CONFIRM;
1136
1137typedef struct CSR_MLME_SCAN_REQUEST
1138{
1139 CSR_DATAREF ChannelList;
1140 CSR_DATAREF InformationElements;
1141 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1142 CSR_IFINTERFACE Ifindex;
1143 CSR_SCAN_TYPE ScanType;
1144 CSR_MICROSECONDS32 ProbeDelay;
1145 CSR_TIME_UNITS MinChannelTime;
1146 CSR_TIME_UNITS MaxChannelTime;
1147} CSR_MLME_SCAN_REQUEST;
1148
1149typedef struct CSR_MLME_SCAN_CANCEL_REQUEST
1150{
1151 CSR_DATAREF Dummydataref1;
1152 CSR_DATAREF Dummydataref2;
1153 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1154} CSR_MLME_SCAN_CANCEL_REQUEST;
1155
1156typedef struct CSR_MLME_SETKEYS_CONFIRM
1157{
1158 CSR_DATAREF Dummydataref1;
1159 CSR_DATAREF Dummydataref2;
1160 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1161 CSR_RESULT_CODE ResultCode;
1162} CSR_MLME_SETKEYS_CONFIRM;
1163
1164typedef struct CSR_MLME_SETKEYS_REQUEST
1165{
1166 CSR_DATAREF Key;
1167 CSR_DATAREF Dummydataref2;
1168 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1169 CSR_NATURAL16 Length;
1170 CSR_NATURAL16 KeyId;
1171 CSR_KEY_TYPE KeyType;
1172 CSR_MACADDRESS Address;
1173 CSR_NATURAL16 SequenceNumber[8];
1174 CSR_CIPHER_SUITE_SELECTOR CipherSuiteSelector;
1175} CSR_MLME_SETKEYS_REQUEST;
1176
1177typedef struct CSR_MLME_SET_CHANNEL_CONFIRM
1178{
1179 CSR_DATAREF Dummydataref1;
1180 CSR_DATAREF Dummydataref2;
1181 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1182 CSR_RESULT_CODE ResultCode;
1183} CSR_MLME_SET_CHANNEL_CONFIRM;
1184
1185typedef struct CSR_MLME_SET_CHANNEL_REQUEST
1186{
1187 CSR_DATAREF Dummydataref1;
1188 CSR_DATAREF Dummydataref2;
1189 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1190 CSR_IFINTERFACE Ifindex;
1191 CSR_CHANNEL_NUMBER Channel;
1192 CSR_MACADDRESS Address;
1193 CSR_TIME_UNITS AvailabilityDuration;
1194 CSR_TIME_UNITS AvailabilityInterval;
1195} CSR_MLME_SET_CHANNEL_REQUEST;
1196
1197typedef struct CSR_MLME_SET_PACKET_FILTER_CONFIRM
1198{
1199 CSR_DATAREF Dummydataref1;
1200 CSR_DATAREF Dummydataref2;
1201 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1202 CSR_RESULT_CODE ResultCode;
1203} CSR_MLME_SET_PACKET_FILTER_CONFIRM;
1204
1205typedef struct CSR_MLME_SET_PACKET_FILTER_REQUEST
1206{
1207 CSR_DATAREF InformationElements;
1208 CSR_DATAREF Dummydataref2;
1209 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1210 CSR_PACKET_FILTER_MODE PacketFilterMode;
1211 CSR_IPV4_ADDRESS ArpFilterAddress;
1212} CSR_MLME_SET_PACKET_FILTER_REQUEST;
1213
1214typedef struct CSR_MLME_SET_TIM_CONFIRM
1215{
1216 CSR_DATAREF Dummydataref1;
1217 CSR_DATAREF Dummydataref2;
1218 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1219 CSR_RESULT_CODE ResultCode;
1220} CSR_MLME_SET_TIM_CONFIRM;
1221
1222typedef struct CSR_MLME_SET_TIM_REQUEST
1223{
1224 CSR_DATAREF Dummydataref1;
1225 CSR_DATAREF Dummydataref2;
1226 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1227 CSR_ASSOCIATION_ID AssociationId;
1228 s16 TimValue;
1229} CSR_MLME_SET_TIM_REQUEST;
1230
1231typedef struct CSR_MLME_SM_START_CONFIRM
1232{
1233 CSR_DATAREF Dummydataref1;
1234 CSR_DATAREF Dummydataref2;
1235 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1236 CSR_RESULT_CODE ResultCode;
1237} CSR_MLME_SM_START_CONFIRM;
1238
1239typedef struct CSR_MLME_SM_START_REQUEST
1240{
1241 CSR_DATAREF Beacon;
1242 CSR_DATAREF BssParameters;
1243 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1244 CSR_IFINTERFACE Ifindex;
1245 CSR_CHANNEL_NUMBER Channel;
1246 CSR_MACADDRESS InterfaceAddress;
1247 CSR_MACADDRESS Bssid;
1248 CSR_TIME_UNITS BeaconPeriod;
1249 CSR_BEACON_PERIODS DtimPeriod;
1250 CSR_CAPABILITY_INFORMATION CapabilityInformation;
1251} CSR_MLME_SM_START_REQUEST;
1252
1253typedef struct CSR_MLME_START_AGGREGATION_CONFIRM
1254{
1255 CSR_DATAREF Dummydataref1;
1256 CSR_DATAREF Dummydataref2;
1257 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1258 CSR_MACADDRESS PeerQstaAddress;
1259 CSR_PRIORITY UserPriority;
1260 CSR_DIRECTION Direction;
1261 CSR_RESULT_CODE ResultCode;
1262 CSR_SEQUENCE_NUMBER SequenceNumber;
1263} CSR_MLME_START_AGGREGATION_CONFIRM;
1264
1265typedef struct CSR_MLME_START_AGGREGATION_REQUEST
1266{
1267 CSR_DATAREF Dummydataref1;
1268 CSR_DATAREF Dummydataref2;
1269 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1270 CSR_MACADDRESS PeerQstaAddress;
1271 CSR_PRIORITY UserPriority;
1272 CSR_DIRECTION Direction;
1273 CSR_STARTING_SEQUENCE_NUMBER StartingSequenceNumber;
1274 CSR_NATURAL16 BufferSize;
1275 CSR_TIME_UNITS BlockAckTimeout;
1276} CSR_MLME_START_AGGREGATION_REQUEST;
1277
1278typedef struct CSR_MLME_STOP_AGGREGATION_CONFIRM
1279{
1280 CSR_DATAREF Dummydataref1;
1281 CSR_DATAREF Dummydataref2;
1282 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1283 CSR_MACADDRESS PeerQstaAddress;
1284 CSR_PRIORITY UserPriority;
1285 CSR_DIRECTION Direction;
1286 CSR_RESULT_CODE ResultCode;
1287} CSR_MLME_STOP_AGGREGATION_CONFIRM;
1288
1289typedef struct CSR_MLME_STOP_AGGREGATION_REQUEST
1290{
1291 CSR_DATAREF Dummydataref1;
1292 CSR_DATAREF Dummydataref2;
1293 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1294 CSR_MACADDRESS PeerQstaAddress;
1295 CSR_PRIORITY UserPriority;
1296 CSR_DIRECTION Direction;
1297} CSR_MLME_STOP_AGGREGATION_REQUEST;
1298
1299typedef struct CSR_MLME_TRIGGERED_GET_INDICATION
1300{
1301 CSR_DATAREF MibAttributeValue;
1302 CSR_DATAREF Dummydataref2;
1303 CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
1304 CSR_MIB_STATUS Status;
1305 CSR_NATURAL16 ErrorIndex;
1306 CSR_TRIGGERED_ID TriggeredId;
1307} CSR_MLME_TRIGGERED_GET_INDICATION;
1308
1309typedef struct CSR_SIGNAL_PRIMITIVE
1310{
1311 CSR_SIGNAL_PRIMITIVE_HEADER SignalPrimitiveHeader;
1312 union
1313 {
1314 CSR_MA_PACKET_REQUEST MaPacketRequest;
1315 CSR_MA_PACKET_CONFIRM MaPacketConfirm;
1316 CSR_MA_PACKET_INDICATION MaPacketIndication;
1317 CSR_MA_PACKET_CANCEL_REQUEST MaPacketCancelRequest;
1318 CSR_MA_VIF_AVAILABILITY_RESPONSE MaVifAvailabilityResponse;
1319 CSR_MA_VIF_AVAILABILITY_INDICATION MaVifAvailabilityIndication;
1320 CSR_MA_PACKET_ERROR_INDICATION MaPacketErrorIndication;
1321 CSR_MLME_RESET_REQUEST MlmeResetRequest;
1322 CSR_MLME_RESET_CONFIRM MlmeResetConfirm;
1323 CSR_MLME_GET_REQUEST MlmeGetRequest;
1324 CSR_MLME_GET_CONFIRM MlmeGetConfirm;
1325 CSR_MLME_SET_REQUEST MlmeSetRequest;
1326 CSR_MLME_SET_CONFIRM MlmeSetConfirm;
1327 CSR_MLME_GET_NEXT_REQUEST MlmeGetNextRequest;
1328 CSR_MLME_GET_NEXT_CONFIRM MlmeGetNextConfirm;
1329 CSR_MLME_POWERMGT_REQUEST MlmePowermgtRequest;
1330 CSR_MLME_POWERMGT_CONFIRM MlmePowermgtConfirm;
1331 CSR_MLME_SCAN_REQUEST MlmeScanRequest;
1332 CSR_MLME_SCAN_CONFIRM MlmeScanConfirm;
1333 CSR_MLME_HL_SYNC_REQUEST MlmeHlSyncRequest;
1334 CSR_MLME_HL_SYNC_CONFIRM MlmeHlSyncConfirm;
1335 CSR_MLME_MEASURE_REQUEST MlmeMeasureRequest;
1336 CSR_MLME_MEASURE_CONFIRM MlmeMeasureConfirm;
1337 CSR_MLME_MEASURE_INDICATION MlmeMeasureIndication;
1338 CSR_MLME_SETKEYS_REQUEST MlmeSetkeysRequest;
1339 CSR_MLME_SETKEYS_CONFIRM MlmeSetkeysConfirm;
1340 CSR_MLME_DELETEKEYS_REQUEST MlmeDeletekeysRequest;
1341 CSR_MLME_DELETEKEYS_CONFIRM MlmeDeletekeysConfirm;
1342 CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION MlmeAutonomousScanLossIndication;
1343 CSR_MLME_CONNECTED_INDICATION MlmeConnectedIndication;
1344 CSR_MLME_SCAN_CANCEL_REQUEST MlmeScanCancelRequest;
1345 CSR_MLME_HL_SYNC_CANCEL_REQUEST MlmeHlSyncCancelRequest;
1346 CSR_MLME_HL_SYNC_CANCEL_CONFIRM MlmeHlSyncCancelConfirm;
1347 CSR_MLME_ADD_PERIODIC_REQUEST MlmeAddPeriodicRequest;
1348 CSR_MLME_ADD_PERIODIC_CONFIRM MlmeAddPeriodicConfirm;
1349 CSR_MLME_DEL_PERIODIC_REQUEST MlmeDelPeriodicRequest;
1350 CSR_MLME_DEL_PERIODIC_CONFIRM MlmeDelPeriodicConfirm;
1351 CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST MlmeAddAutonomousScanRequest;
1352 CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM MlmeAddAutonomousScanConfirm;
1353 CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST MlmeDelAutonomousScanRequest;
1354 CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM MlmeDelAutonomousScanConfirm;