aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/device-drivers.tmpl418
-rw-r--r--Documentation/DocBook/kernel-api.tmpl377
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-rpc/riscpc.c6
-rw-r--r--arch/x86/boot/compressed/head_32.S8
-rw-r--r--arch/x86/boot/compressed/head_64.S10
-rw-r--r--arch/x86/boot/copy.S40
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/boot/pmjump.S16
-rw-r--r--arch/x86/configs/i386_defconfig6
-rw-r--r--arch/x86/configs/x86_64_defconfig6
-rw-r--r--arch/x86/ia32/ia32_signal.c70
-rw-r--r--arch/x86/include/asm/io.h11
-rw-r--r--arch/x86/include/asm/iomap.h6
-rw-r--r--arch/x86/include/asm/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/linkage.h64
-rw-r--r--arch/x86/include/asm/page_32_types.h2
-rw-r--r--arch/x86/include/asm/page_64_types.h2
-rw-r--r--arch/x86/include/asm/page_types.h6
-rw-r--r--arch/x86/include/asm/pat.h3
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h6
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/include/asm/syscalls.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h4
-rw-r--r--arch/x86/include/asm/uaccess_64.h21
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c4
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/efi_stub_32.S3
-rw-r--r--arch/x86/kernel/efi_stub_64.S7
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S25
-rw-r--r--arch/x86/kernel/head_32.S4
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/machine_kexec_32.c2
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S2
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S4
-rw-r--r--arch/x86/kernel/trampoline_32.S2
-rw-r--r--arch/x86/kernel/trampoline_64.S4
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/vmiclock_32.c3
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S2
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S2
-rw-r--r--arch/x86/lib/getuser.S2
-rw-r--r--arch/x86/mm/fault.c1078
-rw-r--r--arch/x86/mm/iomap_32.c58
-rw-r--r--arch/x86/mm/memtest.c156
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/pageattr.c7
-rw-r--r--arch/x86/mm/pat.c46
-rw-r--r--arch/x86/power/hibernate_asm_32.S2
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/vdso/vma.c4
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/xen-head.S2
-rw-r--r--crypto/ahash.c2
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/base/sys.c7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c72
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1
-rw-r--r--drivers/i2c/busses/i2c-acorn.c5
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c4
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/scx200_i2c.c2
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/ieee1394/dma.h1
-rw-r--r--drivers/ieee1394/ieee1394_core.c1
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c31
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h2
-rw-r--r--drivers/ieee1394/iso.h1
-rw-r--r--drivers/ieee1394/nodemgr.c10
-rw-r--r--drivers/ieee1394/nodemgr.h18
-rw-r--r--drivers/isdn/sc/shmem.c2
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile2
-rw-r--r--drivers/media/dvb/firewire/Kconfig22
-rw-r--r--drivers/media/dvb/firewire/Makefile8
-rw-r--r--drivers/media/dvb/firewire/firedtv-1394.c285
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c1315
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c260
-rw-r--r--drivers/media/dvb/firewire/firedtv-dvb.c364
-rw-r--r--drivers/media/dvb/firewire/firedtv-fe.c247
-rw-r--r--drivers/media/dvb/firewire/firedtv-rc.c190
-rw-r--r--drivers/media/dvb/firewire/firedtv.h182
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/Makefile2
-rw-r--r--drivers/net/atl1c/atl1c.h606
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c317
-rw-r--r--drivers/net/atl1c/atl1c_hw.c527
-rw-r--r--drivers/net/atl1c/atl1c_hw.h859
-rw-r--r--drivers/net/atl1c/atl1c_main.c2797
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/cxgb3/t3_hw.c7
-rw-r--r--drivers/net/forcedeth.c13
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/smsc9420.c6
-rw-r--r--drivers/net/smsc9420.h1
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunlance.c4
-rw-r--r--drivers/net/tg3.c4
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--fs/ext4/ialloc.c7
-rw-r--r--fs/ext4/inode.c9
-rw-r--r--fs/proc/inode.c4
-rw-r--r--fs/proc/page.c2
-rw-r--r--include/linux/i2c-dev.h2
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/if_vlan.h1
-rw-r--r--include/linux/io-mapping.h48
-rw-r--r--include/linux/kprobes.h22
-rw-r--r--include/linux/mmiotrace.h78
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/net/sock.h1
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/vmalloc.c3
-rw-r--r--net/core/net_namespace.c86
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c3
-rw-r--r--net/ipv4/cipso_ipv4.c9
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--security/selinux/netlabel.c4
146 files changed, 9805 insertions, 1387 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index dc3154e49279..1462ed86d40a 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -6,7 +6,7 @@
6# To add a new book the only step required is to add the book to the 6# To add a new book the only step required is to add the book to the
7# list of DOCBOOKS. 7# list of DOCBOOKS.
8 8
9DOCBOOKS := z8530book.xml mcabook.xml \ 9DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
10 kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ 10 kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
11 procfs-guide.xml writing_usb_driver.xml networking.xml \ 11 procfs-guide.xml writing_usb_driver.xml networking.xml \
12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \ 12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
new file mode 100644
index 000000000000..94a20fe8fedf
--- /dev/null
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -0,0 +1,418 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4
5<book id="LinuxDriversAPI">
6 <bookinfo>
7 <title>Linux Device Drivers</title>
8
9 <legalnotice>
10 <para>
11 This documentation is free software; you can redistribute
12 it and/or modify it under the terms of the GNU General Public
13 License as published by the Free Software Foundation; either
14 version 2 of the License, or (at your option) any later
15 version.
16 </para>
17
18 <para>
19 This program is distributed in the hope that it will be
20 useful, but WITHOUT ANY WARRANTY; without even the implied
21 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 See the GNU General Public License for more details.
23 </para>
24
25 <para>
26 You should have received a copy of the GNU General Public
27 License along with this program; if not, write to the Free
28 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 MA 02111-1307 USA
30 </para>
31
32 <para>
33 For more details see the file COPYING in the source
34 distribution of Linux.
35 </para>
36 </legalnotice>
37 </bookinfo>
38
39<toc></toc>
40
41 <chapter id="Basics">
42 <title>Driver Basics</title>
43 <sect1><title>Driver Entry and Exit points</title>
44!Iinclude/linux/init.h
45 </sect1>
46
47 <sect1><title>Atomic and pointer manipulation</title>
48!Iarch/x86/include/asm/atomic_32.h
49!Iarch/x86/include/asm/unaligned.h
50 </sect1>
51
52 <sect1><title>Delaying, scheduling, and timer routines</title>
53!Iinclude/linux/sched.h
54!Ekernel/sched.c
55!Ekernel/timer.c
56 </sect1>
57 <sect1><title>High-resolution timers</title>
58!Iinclude/linux/ktime.h
59!Iinclude/linux/hrtimer.h
60!Ekernel/hrtimer.c
61 </sect1>
62 <sect1><title>Workqueues and Kevents</title>
63!Ekernel/workqueue.c
64 </sect1>
65 <sect1><title>Internal Functions</title>
66!Ikernel/exit.c
67!Ikernel/signal.c
68!Iinclude/linux/kthread.h
69!Ekernel/kthread.c
70 </sect1>
71
72 <sect1><title>Kernel objects manipulation</title>
73<!--
74X!Iinclude/linux/kobject.h
75-->
76!Elib/kobject.c
77 </sect1>
78
79 <sect1><title>Kernel utility functions</title>
80!Iinclude/linux/kernel.h
81!Ekernel/printk.c
82!Ekernel/panic.c
83!Ekernel/sys.c
84!Ekernel/rcupdate.c
85 </sect1>
86
87 <sect1><title>Device Resource Management</title>
88!Edrivers/base/devres.c
89 </sect1>
90
91 </chapter>
92
93 <chapter id="devdrivers">
94 <title>Device drivers infrastructure</title>
95 <sect1><title>Device Drivers Base</title>
96<!--
97X!Iinclude/linux/device.h
98-->
99!Edrivers/base/driver.c
100!Edrivers/base/core.c
101!Edrivers/base/class.c
102!Edrivers/base/firmware_class.c
103!Edrivers/base/transport_class.c
104<!-- Cannot be included, because
105 attribute_container_add_class_device_adapter
106 and attribute_container_classdev_to_container
107 exceed allowed 44 characters maximum
108X!Edrivers/base/attribute_container.c
109-->
110!Edrivers/base/sys.c
111<!--
112X!Edrivers/base/interface.c
113-->
114!Edrivers/base/platform.c
115!Edrivers/base/bus.c
116 </sect1>
117 <sect1><title>Device Drivers Power Management</title>
118!Edrivers/base/power/main.c
119 </sect1>
120 <sect1><title>Device Drivers ACPI Support</title>
121<!-- Internal functions only
122X!Edrivers/acpi/sleep/main.c
123X!Edrivers/acpi/sleep/wakeup.c
124X!Edrivers/acpi/motherboard.c
125X!Edrivers/acpi/bus.c
126-->
127!Edrivers/acpi/scan.c
128!Idrivers/acpi/scan.c
129<!-- No correct structured comments
130X!Edrivers/acpi/pci_bind.c
131-->
132 </sect1>
133 <sect1><title>Device drivers PnP support</title>
134!Idrivers/pnp/core.c
135<!-- No correct structured comments
136X!Edrivers/pnp/system.c
137 -->
138!Edrivers/pnp/card.c
139!Idrivers/pnp/driver.c
140!Edrivers/pnp/manager.c
141!Edrivers/pnp/support.c
142 </sect1>
143 <sect1><title>Userspace IO devices</title>
144!Edrivers/uio/uio.c
145!Iinclude/linux/uio_driver.h
146 </sect1>
147 </chapter>
148
149 <chapter id="parportdev">
150 <title>Parallel Port Devices</title>
151!Iinclude/linux/parport.h
152!Edrivers/parport/ieee1284.c
153!Edrivers/parport/share.c
154!Idrivers/parport/daisy.c
155 </chapter>
156
157 <chapter id="message_devices">
158 <title>Message-based devices</title>
159 <sect1><title>Fusion message devices</title>
160!Edrivers/message/fusion/mptbase.c
161!Idrivers/message/fusion/mptbase.c
162!Edrivers/message/fusion/mptscsih.c
163!Idrivers/message/fusion/mptscsih.c
164!Idrivers/message/fusion/mptctl.c
165!Idrivers/message/fusion/mptspi.c
166!Idrivers/message/fusion/mptfc.c
167!Idrivers/message/fusion/mptlan.c
168 </sect1>
169 <sect1><title>I2O message devices</title>
170!Iinclude/linux/i2o.h
171!Idrivers/message/i2o/core.h
172!Edrivers/message/i2o/iop.c
173!Idrivers/message/i2o/iop.c
174!Idrivers/message/i2o/config-osm.c
175!Edrivers/message/i2o/exec-osm.c
176!Idrivers/message/i2o/exec-osm.c
177!Idrivers/message/i2o/bus-osm.c
178!Edrivers/message/i2o/device.c
179!Idrivers/message/i2o/device.c
180!Idrivers/message/i2o/driver.c
181!Idrivers/message/i2o/pci.c
182!Idrivers/message/i2o/i2o_block.c
183!Idrivers/message/i2o/i2o_scsi.c
184!Idrivers/message/i2o/i2o_proc.c
185 </sect1>
186 </chapter>
187
188 <chapter id="snddev">
189 <title>Sound Devices</title>
190!Iinclude/sound/core.h
191!Esound/sound_core.c
192!Iinclude/sound/pcm.h
193!Esound/core/pcm.c
194!Esound/core/device.c
195!Esound/core/info.c
196!Esound/core/rawmidi.c
197!Esound/core/sound.c
198!Esound/core/memory.c
199!Esound/core/pcm_memory.c
200!Esound/core/init.c
201!Esound/core/isadma.c
202!Esound/core/control.c
203!Esound/core/pcm_lib.c
204!Esound/core/hwdep.c
205!Esound/core/pcm_native.c
206!Esound/core/memalloc.c
207<!-- FIXME: Removed for now since no structured comments in source
208X!Isound/sound_firmware.c
209-->
210 </chapter>
211
212 <chapter id="uart16x50">
213 <title>16x50 UART Driver</title>
214!Iinclude/linux/serial_core.h
215!Edrivers/serial/serial_core.c
216!Edrivers/serial/8250.c
217 </chapter>
218
219 <chapter id="fbdev">
220 <title>Frame Buffer Library</title>
221
222 <para>
223 The frame buffer drivers depend heavily on four data structures.
224 These structures are declared in include/linux/fb.h. They are
225 fb_info, fb_var_screeninfo, fb_fix_screeninfo and fb_monospecs.
226 The last three can be made available to and from userland.
227 </para>
228
229 <para>
230 fb_info defines the current state of a particular video card.
231 Inside fb_info, there exists a fb_ops structure which is a
232 collection of needed functions to make fbdev and fbcon work.
233 fb_info is only visible to the kernel.
234 </para>
235
236 <para>
237 fb_var_screeninfo is used to describe the features of a video card
238 that are user defined. With fb_var_screeninfo, things such as
239 depth and the resolution may be defined.
240 </para>
241
242 <para>
243 The next structure is fb_fix_screeninfo. This defines the
244 properties of a card that are created when a mode is set and can't
245 be changed otherwise. A good example of this is the start of the
246 frame buffer memory. This "locks" the address of the frame buffer
247 memory, so that it cannot be changed or moved.
248 </para>
249
250 <para>
251 The last structure is fb_monospecs. In the old API, there was
252 little importance for fb_monospecs. This allowed for forbidden things
253 such as setting a mode of 800x600 on a fix frequency monitor. With
254 the new API, fb_monospecs prevents such things, and if used
255 correctly, can prevent a monitor from being cooked. fb_monospecs
256 will not be useful until kernels 2.5.x.
257 </para>
258
259 <sect1><title>Frame Buffer Memory</title>
260!Edrivers/video/fbmem.c
261 </sect1>
262<!--
263 <sect1><title>Frame Buffer Console</title>
264X!Edrivers/video/console/fbcon.c
265 </sect1>
266-->
267 <sect1><title>Frame Buffer Colormap</title>
268!Edrivers/video/fbcmap.c
269 </sect1>
270<!-- FIXME:
271 drivers/video/fbgen.c has no docs, which stuffs up the sgml. Comment
272 out until somebody adds docs. KAO
273 <sect1><title>Frame Buffer Generic Functions</title>
274X!Idrivers/video/fbgen.c
275 </sect1>
276KAO -->
277 <sect1><title>Frame Buffer Video Mode Database</title>
278!Idrivers/video/modedb.c
279!Edrivers/video/modedb.c
280 </sect1>
281 <sect1><title>Frame Buffer Macintosh Video Mode Database</title>
282!Edrivers/video/macmodes.c
283 </sect1>
284 <sect1><title>Frame Buffer Fonts</title>
285 <para>
286 Refer to the file drivers/video/console/fonts.c for more information.
287 </para>
288<!-- FIXME: Removed for now since no structured comments in source
289X!Idrivers/video/console/fonts.c
290-->
291 </sect1>
292 </chapter>
293
294 <chapter id="input_subsystem">
295 <title>Input Subsystem</title>
296!Iinclude/linux/input.h
297!Edrivers/input/input.c
298!Edrivers/input/ff-core.c
299!Edrivers/input/ff-memless.c
300 </chapter>
301
302 <chapter id="spi">
303 <title>Serial Peripheral Interface (SPI)</title>
304 <para>
305 SPI is the "Serial Peripheral Interface", widely used with
306 embedded systems because it is a simple and efficient
307 interface: basically a multiplexed shift register.
308 Its three signal wires hold a clock (SCK, often in the range
309 of 1-20 MHz), a "Master Out, Slave In" (MOSI) data line, and
310 a "Master In, Slave Out" (MISO) data line.
311 SPI is a full duplex protocol; for each bit shifted out the
312 MOSI line (one per clock) another is shifted in on the MISO line.
313 Those bits are assembled into words of various sizes on the
314 way to and from system memory.
315 An additional chipselect line is usually active-low (nCS);
316 four signals are normally used for each peripheral, plus
317 sometimes an interrupt.
318 </para>
319 <para>
320 The SPI bus facilities listed here provide a generalized
321 interface to declare SPI busses and devices, manage them
322 according to the standard Linux driver model, and perform
323 input/output operations.
324 At this time, only "master" side interfaces are supported,
325 where Linux talks to SPI peripherals and does not implement
326 such a peripheral itself.
327 (Interfaces to support implementing SPI slaves would
328 necessarily look different.)
329 </para>
330 <para>
331 The programming interface is structured around two kinds of driver,
332 and two kinds of device.
333 A "Controller Driver" abstracts the controller hardware, which may
334 be as simple as a set of GPIO pins or as complex as a pair of FIFOs
335 connected to dual DMA engines on the other side of the SPI shift
336 register (maximizing throughput). Such drivers bridge between
337 whatever bus they sit on (often the platform bus) and SPI, and
338 expose the SPI side of their device as a
339 <structname>struct spi_master</structname>.
340 SPI devices are children of that master, represented as a
341 <structname>struct spi_device</structname> and manufactured from
342 <structname>struct spi_board_info</structname> descriptors which
343 are usually provided by board-specific initialization code.
344 A <structname>struct spi_driver</structname> is called a
345 "Protocol Driver", and is bound to a spi_device using normal
346 driver model calls.
347 </para>
348 <para>
349 The I/O model is a set of queued messages. Protocol drivers
350 submit one or more <structname>struct spi_message</structname>
351 objects, which are processed and completed asynchronously.
352 (There are synchronous wrappers, however.) Messages are
353 built from one or more <structname>struct spi_transfer</structname>
354 objects, each of which wraps a full duplex SPI transfer.
355 A variety of protocol tweaking options are needed, because
356 different chips adopt very different policies for how they
357 use the bits transferred with SPI.
358 </para>
359!Iinclude/linux/spi/spi.h
360!Fdrivers/spi/spi.c spi_register_board_info
361!Edrivers/spi/spi.c
362 </chapter>
363
364 <chapter id="i2c">
365 <title>I<superscript>2</superscript>C and SMBus Subsystem</title>
366
367 <para>
368 I<superscript>2</superscript>C (or without fancy typography, "I2C")
369 is an acronym for the "Inter-IC" bus, a simple bus protocol which is
370 widely used where low data rate communications suffice.
371 Since it's also a licensed trademark, some vendors use another
372 name (such as "Two-Wire Interface", TWI) for the same bus.
373 I2C only needs two signals (SCL for clock, SDA for data), conserving
374 board real estate and minimizing signal quality issues.
375 Most I2C devices use seven bit addresses, and bus speeds of up
376 to 400 kHz; there's a high speed extension (3.4 MHz) that's not yet
377 found wide use.
378 I2C is a multi-master bus; open drain signaling is used to
379 arbitrate between masters, as well as to handshake and to
380 synchronize clocks from slower clients.
381 </para>
382
383 <para>
384 The Linux I2C programming interfaces support only the master
385 side of bus interactions, not the slave side.
386 The programming interface is structured around two kinds of driver,
387 and two kinds of device.
388 An I2C "Adapter Driver" abstracts the controller hardware; it binds
389 to a physical device (perhaps a PCI device or platform_device) and
390 exposes a <structname>struct i2c_adapter</structname> representing
391 each I2C bus segment it manages.
392 On each I2C bus segment will be I2C devices represented by a
393 <structname>struct i2c_client</structname>. Those devices will
394 be bound to a <structname>struct i2c_driver</structname>,
395 which should follow the standard Linux driver model.
396 (At this writing, a legacy model is more widely used.)
397 There are functions to perform various I2C protocol operations; at
398 this writing all such functions are usable only from task context.
399 </para>
400
401 <para>
402 The System Management Bus (SMBus) is a sibling protocol. Most SMBus
403 systems are also I2C conformant. The electrical constraints are
404 tighter for SMBus, and it standardizes particular protocol messages
405 and idioms. Controllers that support I2C can also support most
406 SMBus operations, but SMBus controllers don't support all the protocol
407 options that an I2C controller will.
408 There are functions to perform various SMBus protocol operations,
409 either using I2C primitives or by issuing SMBus commands to
410 i2c_adapter devices which don't support those I2C operations.
411 </para>
412
413!Iinclude/linux/i2c.h
414!Fdrivers/i2c/i2c-boardinfo.c i2c_register_board_info
415!Edrivers/i2c/i2c-core.c
416 </chapter>
417
418</book>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 5818ff75786a..bc962cda6504 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -38,58 +38,6 @@
38 38
39<toc></toc> 39<toc></toc>
40 40
41 <chapter id="Basics">
42 <title>Driver Basics</title>
43 <sect1><title>Driver Entry and Exit points</title>
44!Iinclude/linux/init.h
45 </sect1>
46
47 <sect1><title>Atomic and pointer manipulation</title>
48!Iarch/x86/include/asm/atomic_32.h
49!Iarch/x86/include/asm/unaligned.h
50 </sect1>
51
52 <sect1><title>Delaying, scheduling, and timer routines</title>
53!Iinclude/linux/sched.h
54!Ekernel/sched.c
55!Ekernel/timer.c
56 </sect1>
57 <sect1><title>High-resolution timers</title>
58!Iinclude/linux/ktime.h
59!Iinclude/linux/hrtimer.h
60!Ekernel/hrtimer.c
61 </sect1>
62 <sect1><title>Workqueues and Kevents</title>
63!Ekernel/workqueue.c
64 </sect1>
65 <sect1><title>Internal Functions</title>
66!Ikernel/exit.c
67!Ikernel/signal.c
68!Iinclude/linux/kthread.h
69!Ekernel/kthread.c
70 </sect1>
71
72 <sect1><title>Kernel objects manipulation</title>
73<!--
74X!Iinclude/linux/kobject.h
75-->
76!Elib/kobject.c
77 </sect1>
78
79 <sect1><title>Kernel utility functions</title>
80!Iinclude/linux/kernel.h
81!Ekernel/printk.c
82!Ekernel/panic.c
83!Ekernel/sys.c
84!Ekernel/rcupdate.c
85 </sect1>
86
87 <sect1><title>Device Resource Management</title>
88!Edrivers/base/devres.c
89 </sect1>
90
91 </chapter>
92
93 <chapter id="adt"> 41 <chapter id="adt">
94 <title>Data Types</title> 42 <title>Data Types</title>
95 <sect1><title>Doubly Linked Lists</title> 43 <sect1><title>Doubly Linked Lists</title>
@@ -298,62 +246,6 @@ X!Earch/x86/kernel/mca_32.c
298!Ikernel/acct.c 246!Ikernel/acct.c
299 </chapter> 247 </chapter>
300 248
301 <chapter id="devdrivers">
302 <title>Device drivers infrastructure</title>
303 <sect1><title>Device Drivers Base</title>
304<!--
305X!Iinclude/linux/device.h
306-->
307!Edrivers/base/driver.c
308!Edrivers/base/core.c
309!Edrivers/base/class.c
310!Edrivers/base/firmware_class.c
311!Edrivers/base/transport_class.c
312<!-- Cannot be included, because
313 attribute_container_add_class_device_adapter
314 and attribute_container_classdev_to_container
315 exceed allowed 44 characters maximum
316X!Edrivers/base/attribute_container.c
317-->
318!Edrivers/base/sys.c
319<!--
320X!Edrivers/base/interface.c
321-->
322!Edrivers/base/platform.c
323!Edrivers/base/bus.c
324 </sect1>
325 <sect1><title>Device Drivers Power Management</title>
326!Edrivers/base/power/main.c
327 </sect1>
328 <sect1><title>Device Drivers ACPI Support</title>
329<!-- Internal functions only
330X!Edrivers/acpi/sleep/main.c
331X!Edrivers/acpi/sleep/wakeup.c
332X!Edrivers/acpi/motherboard.c
333X!Edrivers/acpi/bus.c
334-->
335!Edrivers/acpi/scan.c
336!Idrivers/acpi/scan.c
337<!-- No correct structured comments
338X!Edrivers/acpi/pci_bind.c
339-->
340 </sect1>
341 <sect1><title>Device drivers PnP support</title>
342!Idrivers/pnp/core.c
343<!-- No correct structured comments
344X!Edrivers/pnp/system.c
345 -->
346!Edrivers/pnp/card.c
347!Idrivers/pnp/driver.c
348!Edrivers/pnp/manager.c
349!Edrivers/pnp/support.c
350 </sect1>
351 <sect1><title>Userspace IO devices</title>
352!Edrivers/uio/uio.c
353!Iinclude/linux/uio_driver.h
354 </sect1>
355 </chapter>
356
357 <chapter id="blkdev"> 249 <chapter id="blkdev">
358 <title>Block Devices</title> 250 <title>Block Devices</title>
359!Eblock/blk-core.c 251!Eblock/blk-core.c
@@ -381,275 +273,6 @@ X!Edrivers/pnp/system.c
381!Edrivers/char/misc.c 273!Edrivers/char/misc.c
382 </chapter> 274 </chapter>
383 275
384 <chapter id="parportdev">
385 <title>Parallel Port Devices</title>
386!Iinclude/linux/parport.h
387!Edrivers/parport/ieee1284.c
388!Edrivers/parport/share.c
389!Idrivers/parport/daisy.c
390 </chapter>
391
392 <chapter id="message_devices">
393 <title>Message-based devices</title>
394 <sect1><title>Fusion message devices</title>
395!Edrivers/message/fusion/mptbase.c
396!Idrivers/message/fusion/mptbase.c
397!Edrivers/message/fusion/mptscsih.c
398!Idrivers/message/fusion/mptscsih.c
399!Idrivers/message/fusion/mptctl.c
400!Idrivers/message/fusion/mptspi.c
401!Idrivers/message/fusion/mptfc.c
402!Idrivers/message/fusion/mptlan.c
403 </sect1>
404 <sect1><title>I2O message devices</title>
405!Iinclude/linux/i2o.h
406!Idrivers/message/i2o/core.h
407!Edrivers/message/i2o/iop.c
408!Idrivers/message/i2o/iop.c
409!Idrivers/message/i2o/config-osm.c
410!Edrivers/message/i2o/exec-osm.c
411!Idrivers/message/i2o/exec-osm.c
412!Idrivers/message/i2o/bus-osm.c
413!Edrivers/message/i2o/device.c
414!Idrivers/message/i2o/device.c
415!Idrivers/message/i2o/driver.c
416!Idrivers/message/i2o/pci.c
417!Idrivers/message/i2o/i2o_block.c
418!Idrivers/message/i2o/i2o_scsi.c
419!Idrivers/message/i2o/i2o_proc.c
420 </sect1>
421 </chapter>
422
423 <chapter id="snddev">
424 <title>Sound Devices</title>
425!Iinclude/sound/core.h
426!Esound/sound_core.c
427!Iinclude/sound/pcm.h
428!Esound/core/pcm.c
429!Esound/core/device.c
430!Esound/core/info.c
431!Esound/core/rawmidi.c
432!Esound/core/sound.c
433!Esound/core/memory.c
434!Esound/core/pcm_memory.c
435!Esound/core/init.c
436!Esound/core/isadma.c
437!Esound/core/control.c
438!Esound/core/pcm_lib.c
439!Esound/core/hwdep.c
440!Esound/core/pcm_native.c
441!Esound/core/memalloc.c
442<!-- FIXME: Removed for now since no structured comments in source
443X!Isound/sound_firmware.c
444-->
445 </chapter>
446
447 <chapter id="uart16x50">
448 <title>16x50 UART Driver</title>
449!Iinclude/linux/serial_core.h
450!Edrivers/serial/serial_core.c
451!Edrivers/serial/8250.c
452 </chapter>
453
454 <chapter id="fbdev">
455 <title>Frame Buffer Library</title>
456
457 <para>
458 The frame buffer drivers depend heavily on four data structures.
459 These structures are declared in include/linux/fb.h. They are
460 fb_info, fb_var_screeninfo, fb_fix_screeninfo and fb_monospecs.
461 The last three can be made available to and from userland.
462 </para>
463
464 <para>
465 fb_info defines the current state of a particular video card.
466 Inside fb_info, there exists a fb_ops structure which is a
467 collection of needed functions to make fbdev and fbcon work.
468 fb_info is only visible to the kernel.
469 </para>
470
471 <para>
472 fb_var_screeninfo is used to describe the features of a video card
473 that are user defined. With fb_var_screeninfo, things such as
474 depth and the resolution may be defined.
475 </para>
476
477 <para>
478 The next structure is fb_fix_screeninfo. This defines the
479 properties of a card that are created when a mode is set and can't
480 be changed otherwise. A good example of this is the start of the
481 frame buffer memory. This "locks" the address of the frame buffer
482 memory, so that it cannot be changed or moved.
483 </para>
484
485 <para>
486 The last structure is fb_monospecs. In the old API, there was
487 little importance for fb_monospecs. This allowed for forbidden things
488 such as setting a mode of 800x600 on a fix frequency monitor. With
489 the new API, fb_monospecs prevents such things, and if used
490 correctly, can prevent a monitor from being cooked. fb_monospecs
491 will not be useful until kernels 2.5.x.
492 </para>
493
494 <sect1><title>Frame Buffer Memory</title>
495!Edrivers/video/fbmem.c
496 </sect1>
497<!--
498 <sect1><title>Frame Buffer Console</title>
499X!Edrivers/video/console/fbcon.c
500 </sect1>
501-->
502 <sect1><title>Frame Buffer Colormap</title>
503!Edrivers/video/fbcmap.c
504 </sect1>
505<!-- FIXME:
506 drivers/video/fbgen.c has no docs, which stuffs up the sgml. Comment
507 out until somebody adds docs. KAO
508 <sect1><title>Frame Buffer Generic Functions</title>
509X!Idrivers/video/fbgen.c
510 </sect1>
511KAO -->
512 <sect1><title>Frame Buffer Video Mode Database</title>
513!Idrivers/video/modedb.c
514!Edrivers/video/modedb.c
515 </sect1>
516 <sect1><title>Frame Buffer Macintosh Video Mode Database</title>
517!Edrivers/video/macmodes.c
518 </sect1>
519 <sect1><title>Frame Buffer Fonts</title>
520 <para>
521 Refer to the file drivers/video/console/fonts.c for more information.
522 </para>
523<!-- FIXME: Removed for now since no structured comments in source
524X!Idrivers/video/console/fonts.c
525-->
526 </sect1>
527 </chapter>
528
529 <chapter id="input_subsystem">
530 <title>Input Subsystem</title>
531!Iinclude/linux/input.h
532!Edrivers/input/input.c
533!Edrivers/input/ff-core.c
534!Edrivers/input/ff-memless.c
535 </chapter>
536
537 <chapter id="spi">
538 <title>Serial Peripheral Interface (SPI)</title>
539 <para>
540 SPI is the "Serial Peripheral Interface", widely used with
541 embedded systems because it is a simple and efficient
542 interface: basically a multiplexed shift register.
543 Its three signal wires hold a clock (SCK, often in the range
544 of 1-20 MHz), a "Master Out, Slave In" (MOSI) data line, and
545 a "Master In, Slave Out" (MISO) data line.
546 SPI is a full duplex protocol; for each bit shifted out the
547 MOSI line (one per clock) another is shifted in on the MISO line.
548 Those bits are assembled into words of various sizes on the
549 way to and from system memory.
550 An additional chipselect line is usually active-low (nCS);
551 four signals are normally used for each peripheral, plus
552 sometimes an interrupt.
553 </para>
554 <para>
555 The SPI bus facilities listed here provide a generalized
556 interface to declare SPI busses and devices, manage them
557 according to the standard Linux driver model, and perform
558 input/output operations.
559 At this time, only "master" side interfaces are supported,
560 where Linux talks to SPI peripherals and does not implement
561 such a peripheral itself.
562 (Interfaces to support implementing SPI slaves would
563 necessarily look different.)
564 </para>
565 <para>
566 The programming interface is structured around two kinds of driver,
567 and two kinds of device.
568 A "Controller Driver" abstracts the controller hardware, which may
569 be as simple as a set of GPIO pins or as complex as a pair of FIFOs
570 connected to dual DMA engines on the other side of the SPI shift
571 register (maximizing throughput). Such drivers bridge between
572 whatever bus they sit on (often the platform bus) and SPI, and
573 expose the SPI side of their device as a
574 <structname>struct spi_master</structname>.
575 SPI devices are children of that master, represented as a
576 <structname>struct spi_device</structname> and manufactured from
577 <structname>struct spi_board_info</structname> descriptors which
578 are usually provided by board-specific initialization code.
579 A <structname>struct spi_driver</structname> is called a
580 "Protocol Driver", and is bound to a spi_device using normal
581 driver model calls.
582 </para>
583 <para>
584 The I/O model is a set of queued messages. Protocol drivers
585 submit one or more <structname>struct spi_message</structname>
586 objects, which are processed and completed asynchronously.
587 (There are synchronous wrappers, however.) Messages are
588 built from one or more <structname>struct spi_transfer</structname>
589 objects, each of which wraps a full duplex SPI transfer.
590 A variety of protocol tweaking options are needed, because
591 different chips adopt very different policies for how they
592 use the bits transferred with SPI.
593 </para>
594!Iinclude/linux/spi/spi.h
595!Fdrivers/spi/spi.c spi_register_board_info
596!Edrivers/spi/spi.c
597 </chapter>
598
599 <chapter id="i2c">
600 <title>I<superscript>2</superscript>C and SMBus Subsystem</title>
601
602 <para>
603 I<superscript>2</superscript>C (or without fancy typography, "I2C")
604 is an acronym for the "Inter-IC" bus, a simple bus protocol which is
605 widely used where low data rate communications suffice.
606 Since it's also a licensed trademark, some vendors use another
607 name (such as "Two-Wire Interface", TWI) for the same bus.
608 I2C only needs two signals (SCL for clock, SDA for data), conserving
609 board real estate and minimizing signal quality issues.
610 Most I2C devices use seven bit addresses, and bus speeds of up
611 to 400 kHz; there's a high speed extension (3.4 MHz) that's not yet
612 found wide use.
613 I2C is a multi-master bus; open drain signaling is used to
614 arbitrate between masters, as well as to handshake and to
615 synchronize clocks from slower clients.
616 </para>
617
618 <para>
619 The Linux I2C programming interfaces support only the master
620 side of bus interactions, not the slave side.
621 The programming interface is structured around two kinds of driver,
622 and two kinds of device.
623 An I2C "Adapter Driver" abstracts the controller hardware; it binds
624 to a physical device (perhaps a PCI device or platform_device) and
625 exposes a <structname>struct i2c_adapter</structname> representing
626 each I2C bus segment it manages.
627 On each I2C bus segment will be I2C devices represented by a
628 <structname>struct i2c_client</structname>. Those devices will
629 be bound to a <structname>struct i2c_driver</structname>,
630 which should follow the standard Linux driver model.
631 (At this writing, a legacy model is more widely used.)
632 There are functions to perform various I2C protocol operations; at
633 this writing all such functions are usable only from task context.
634 </para>
635
636 <para>
637 The System Management Bus (SMBus) is a sibling protocol. Most SMBus
638 systems are also I2C conformant. The electrical constraints are
639 tighter for SMBus, and it standardizes particular protocol messages
640 and idioms. Controllers that support I2C can also support most
641 SMBus operations, but SMBus controllers don't support all the protocol
642 options that an I2C controller will.
643 There are functions to perform various SMBus protocol operations,
644 either using I2C primitives or by issuing SMBus commands to
645 i2c_adapter devices which don't support those I2C operations.
646 </para>
647
648!Iinclude/linux/i2c.h
649!Fdrivers/i2c/i2c-boardinfo.c i2c_register_board_info
650!Edrivers/i2c/i2c-core.c
651 </chapter>
652
653 <chapter id="clk"> 276 <chapter id="clk">
654 <title>Clock Framework</title> 277 <title>Clock Framework</title>
655 278
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b182626739ea..10c4b8b75c96 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -114,7 +114,7 @@ In addition, the following text indicates that the option:
114Parameters denoted with BOOT are actually interpreted by the boot 114Parameters denoted with BOOT are actually interpreted by the boot
115loader, and have no meaning to the kernel directly. 115loader, and have no meaning to the kernel directly.
116Do not modify the syntax of boot loader parameters without extreme 116Do not modify the syntax of boot loader parameters without extreme
117need or coordination with <Documentation/x86/i386/boot.txt>. 117need or coordination with <Documentation/x86/boot.txt>.
118 118
119There are also arch-specific kernel-parameters not documented here. 119There are also arch-specific kernel-parameters not documented here.
120See for example <Documentation/x86/x86_64/boot-options.txt>. 120See for example <Documentation/x86/x86_64/boot-options.txt>.
@@ -134,7 +134,7 @@ and is between 256 and 4096 characters. It is defined in the file
134 134
135 acpi= [HW,ACPI,X86-64,i386] 135 acpi= [HW,ACPI,X86-64,i386]
136 Advanced Configuration and Power Interface 136 Advanced Configuration and Power Interface
137 Format: { force | off | ht | strict | noirq } 137 Format: { force | off | ht | strict | noirq | rsdt }
138 force -- enable ACPI if default was off 138 force -- enable ACPI if default was off
139 off -- disable ACPI if default was on 139 off -- disable ACPI if default was on
140 noirq -- do not use ACPI for IRQ routing 140 noirq -- do not use ACPI for IRQ routing
@@ -1308,8 +1308,13 @@ and is between 256 and 4096 characters. It is defined in the file
1308 1308
1309 memtest= [KNL,X86] Enable memtest 1309 memtest= [KNL,X86] Enable memtest
1310 Format: <integer> 1310 Format: <integer>
1311 range: 0,4 : pattern number
1312 default : 0 <disable> 1311 default : 0 <disable>
1312 Specifies the number of memtest passes to be
1313 performed. Each pass selects another test
1314 pattern from a given set of patterns. Memtest
1315 fills the memory with this pattern, validates
1316 memory contents and reserves bad memory
1317 regions that are detected.
1313 1318
1314 meye.*= [HW] Set MotionEye Camera parameters 1319 meye.*= [HW] Set MotionEye Camera parameters
1315 See Documentation/video4linux/meye.txt. 1320 See Documentation/video4linux/meye.txt.
@@ -2449,7 +2454,7 @@ and is between 256 and 4096 characters. It is defined in the file
2449 See Documentation/fb/modedb.txt. 2454 See Documentation/fb/modedb.txt.
2450 2455
2451 vga= [BOOT,X86-32] Select a particular video mode 2456 vga= [BOOT,X86-32] Select a particular video mode
2452 See Documentation/x86/i386/boot.txt and 2457 See Documentation/x86/boot.txt and
2453 Documentation/svga.txt. 2458 Documentation/svga.txt.
2454 Use vga=ask for menu. 2459 Use vga=ask for menu.
2455 This is actually a boot loader parameter; the value is 2460 This is actually a boot loader parameter; the value is
diff --git a/Makefile b/Makefile
index 96628d0b48d2..27fb890a2bff 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 29 3SUBLEVEL = 29
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Erotic Pickled Herring 5NAME = Erotic Pickled Herring
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index e88d417736af..c7fc01e9d1f6 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -19,6 +19,7 @@
19#include <linux/serial_8250.h> 19#include <linux/serial_8250.h>
20#include <linux/ata_platform.h> 20#include <linux/ata_platform.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/i2c.h>
22 23
23#include <asm/elf.h> 24#include <asm/elf.h>
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
@@ -201,8 +202,13 @@ static struct platform_device *devs[] __initdata = {
201 &pata_device, 202 &pata_device,
202}; 203};
203 204
205static struct i2c_board_info i2c_rtc = {
206 I2C_BOARD_INFO("pcf8583", 0x50)
207};
208
204static int __init rpc_init(void) 209static int __init rpc_init(void)
205{ 210{
211 i2c_register_board_info(0, &i2c_rtc, 1);
206 return platform_add_devices(devs, ARRAY_SIZE(devs)); 212 return platform_add_devices(devs, ARRAY_SIZE(devs));
207} 213}
208 214
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 29c5fbf08392..3a8a866fb2e2 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -25,14 +25,12 @@
25 25
26#include <linux/linkage.h> 26#include <linux/linkage.h>
27#include <asm/segment.h> 27#include <asm/segment.h>
28#include <asm/page.h> 28#include <asm/page_types.h>
29#include <asm/boot.h> 29#include <asm/boot.h>
30#include <asm/asm-offsets.h> 30#include <asm/asm-offsets.h>
31 31
32.section ".text.head","ax",@progbits 32.section ".text.head","ax",@progbits
33 .globl startup_32 33ENTRY(startup_32)
34
35startup_32:
36 cld 34 cld
37 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 35 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
38 * us to not reload segments */ 36 * us to not reload segments */
@@ -113,6 +111,8 @@ startup_32:
113 */ 111 */
114 leal relocated(%ebx), %eax 112 leal relocated(%ebx), %eax
115 jmp *%eax 113 jmp *%eax
114ENDPROC(startup_32)
115
116.section ".text" 116.section ".text"
117relocated: 117relocated:
118 118
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 1d5dff4123e1..ed4a82948002 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -26,8 +26,8 @@
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <asm/segment.h> 28#include <asm/segment.h>
29#include <asm/pgtable.h> 29#include <asm/pgtable_types.h>
30#include <asm/page.h> 30#include <asm/page_types.h>
31#include <asm/boot.h> 31#include <asm/boot.h>
32#include <asm/msr.h> 32#include <asm/msr.h>
33#include <asm/processor-flags.h> 33#include <asm/processor-flags.h>
@@ -35,9 +35,7 @@
35 35
36.section ".text.head" 36.section ".text.head"
37 .code32 37 .code32
38 .globl startup_32 38ENTRY(startup_32)
39
40startup_32:
41 cld 39 cld
42 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 40 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
43 * us to not reload segments */ 41 * us to not reload segments */
@@ -176,6 +174,7 @@ startup_32:
176 174
177 /* Jump from 32bit compatibility mode into 64bit mode. */ 175 /* Jump from 32bit compatibility mode into 64bit mode. */
178 lret 176 lret
177ENDPROC(startup_32)
179 178
180no_longmode: 179no_longmode:
181 /* This isn't an x86-64 CPU so hang */ 180 /* This isn't an x86-64 CPU so hang */
@@ -295,7 +294,6 @@ relocated:
295 call decompress_kernel 294 call decompress_kernel
296 popq %rsi 295 popq %rsi
297 296
298
299/* 297/*
300 * Jump to the decompressed kernel. 298 * Jump to the decompressed kernel.
301 */ 299 */
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index ef50c84e8b4b..11f272c6f5e9 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -8,6 +8,8 @@
8 * 8 *
9 * ----------------------------------------------------------------------- */ 9 * ----------------------------------------------------------------------- */
10 10
11#include <linux/linkage.h>
12
11/* 13/*
12 * Memory copy routines 14 * Memory copy routines
13 */ 15 */
@@ -15,9 +17,7 @@
15 .code16gcc 17 .code16gcc
16 .text 18 .text
17 19
18 .globl memcpy 20GLOBAL(memcpy)
19 .type memcpy, @function
20memcpy:
21 pushw %si 21 pushw %si
22 pushw %di 22 pushw %di
23 movw %ax, %di 23 movw %ax, %di
@@ -31,11 +31,9 @@ memcpy:
31 popw %di 31 popw %di
32 popw %si 32 popw %si
33 ret 33 ret
34 .size memcpy, .-memcpy 34ENDPROC(memcpy)
35 35
36 .globl memset 36GLOBAL(memset)
37 .type memset, @function
38memset:
39 pushw %di 37 pushw %di
40 movw %ax, %di 38 movw %ax, %di
41 movzbl %dl, %eax 39 movzbl %dl, %eax
@@ -48,52 +46,42 @@ memset:
48 rep; stosb 46 rep; stosb
49 popw %di 47 popw %di
50 ret 48 ret
51 .size memset, .-memset 49ENDPROC(memset)
52 50
53 .globl copy_from_fs 51GLOBAL(copy_from_fs)
54 .type copy_from_fs, @function
55copy_from_fs:
56 pushw %ds 52 pushw %ds
57 pushw %fs 53 pushw %fs
58 popw %ds 54 popw %ds
59 call memcpy 55 call memcpy
60 popw %ds 56 popw %ds
61 ret 57 ret
62 .size copy_from_fs, .-copy_from_fs 58ENDPROC(copy_from_fs)
63 59
64 .globl copy_to_fs 60GLOBAL(copy_to_fs)
65 .type copy_to_fs, @function
66copy_to_fs:
67 pushw %es 61 pushw %es
68 pushw %fs 62 pushw %fs
69 popw %es 63 popw %es
70 call memcpy 64 call memcpy
71 popw %es 65 popw %es
72 ret 66 ret
73 .size copy_to_fs, .-copy_to_fs 67ENDPROC(copy_to_fs)
74 68
75#if 0 /* Not currently used, but can be enabled as needed */ 69#if 0 /* Not currently used, but can be enabled as needed */
76 70GLOBAL(copy_from_gs)
77 .globl copy_from_gs
78 .type copy_from_gs, @function
79copy_from_gs:
80 pushw %ds 71 pushw %ds
81 pushw %gs 72 pushw %gs
82 popw %ds 73 popw %ds
83 call memcpy 74 call memcpy
84 popw %ds 75 popw %ds
85 ret 76 ret
86 .size copy_from_gs, .-copy_from_gs 77ENDPROC(copy_from_gs)
87 .globl copy_to_gs
88 78
89 .type copy_to_gs, @function 79GLOBAL(copy_to_gs)
90copy_to_gs:
91 pushw %es 80 pushw %es
92 pushw %gs 81 pushw %gs
93 popw %es 82 popw %es
94 call memcpy 83 call memcpy
95 popw %es 84 popw %es
96 ret 85 ret
97 .size copy_to_gs, .-copy_to_gs 86ENDPROC(copy_to_gs)
98
99#endif 87#endif
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index b993062e9a5f..7ccff4884a23 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -19,7 +19,7 @@
19#include <linux/utsrelease.h> 19#include <linux/utsrelease.h>
20#include <asm/boot.h> 20#include <asm/boot.h>
21#include <asm/e820.h> 21#include <asm/e820.h>
22#include <asm/page.h> 22#include <asm/page_types.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include "boot.h" 24#include "boot.h"
25#include "offsets.h" 25#include "offsets.h"
diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
index 141b6e20ed31..019c17a75851 100644
--- a/arch/x86/boot/pmjump.S
+++ b/arch/x86/boot/pmjump.S
@@ -15,18 +15,15 @@
15#include <asm/boot.h> 15#include <asm/boot.h>
16#include <asm/processor-flags.h> 16#include <asm/processor-flags.h>
17#include <asm/segment.h> 17#include <asm/segment.h>
18#include <linux/linkage.h>
18 19
19 .text 20 .text
20
21 .globl protected_mode_jump
22 .type protected_mode_jump, @function
23
24 .code16 21 .code16
25 22
26/* 23/*
27 * void protected_mode_jump(u32 entrypoint, u32 bootparams); 24 * void protected_mode_jump(u32 entrypoint, u32 bootparams);
28 */ 25 */
29protected_mode_jump: 26GLOBAL(protected_mode_jump)
30 movl %edx, %esi # Pointer to boot_params table 27 movl %edx, %esi # Pointer to boot_params table
31 28
32 xorl %ebx, %ebx 29 xorl %ebx, %ebx
@@ -47,12 +44,10 @@ protected_mode_jump:
47 .byte 0x66, 0xea # ljmpl opcode 44 .byte 0x66, 0xea # ljmpl opcode
482: .long in_pm32 # offset 452: .long in_pm32 # offset
49 .word __BOOT_CS # segment 46 .word __BOOT_CS # segment
50 47ENDPROC(protected_mode_jump)
51 .size protected_mode_jump, .-protected_mode_jump
52 48
53 .code32 49 .code32
54 .type in_pm32, @function 50GLOBAL(in_pm32)
55in_pm32:
56 # Set up data segments for flat 32-bit mode 51 # Set up data segments for flat 32-bit mode
57 movl %ecx, %ds 52 movl %ecx, %ds
58 movl %ecx, %es 53 movl %ecx, %es
@@ -78,5 +73,4 @@ in_pm32:
78 lldt %cx 73 lldt %cx
79 74
80 jmpl *%eax # Jump to the 32-bit entrypoint 75 jmpl *%eax # Jump to the 32-bit entrypoint
81 76ENDPROC(in_pm32)
82 .size in_pm32, .-in_pm32
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5c023f6f652c..235b81d0f6f2 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:57 2009 4# Tue Feb 24 15:50:58 2009
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
266CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
267CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
269# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_NONFATAL=y
271CONFIG_X86_MCE_P4THERMAL=y
270CONFIG_VM86=y 272CONFIG_VM86=y
271# CONFIG_TOSHIBA is not set 273# CONFIG_TOSHIBA is not set
272# CONFIG_I8K is not set 274# CONFIG_I8K is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 4157cc4a2bde..9fe5d212ab4c 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:29 2009 4# Tue Feb 24 15:44:16 2009
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
266CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
267CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
269# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_INTEL=y
271CONFIG_X86_MCE_AMD=y
270# CONFIG_I8K is not set 272# CONFIG_I8K is not set
271CONFIG_MICROCODE=y 273CONFIG_MICROCODE=y
272CONFIG_MICROCODE_INTEL=y 274CONFIG_MICROCODE_INTEL=y
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index dd77ac0cac46..588a7aa937e1 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -33,8 +33,6 @@
33#include <asm/sigframe.h> 33#include <asm/sigframe.h>
34#include <asm/sys_ia32.h> 34#include <asm/sys_ia32.h>
35 35
36#define DEBUG_SIG 0
37
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39 37
40#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ 38#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
@@ -190,42 +188,47 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
190/* 188/*
191 * Do a signal return; undo the signal stack. 189 * Do a signal return; undo the signal stack.
192 */ 190 */
191#define loadsegment_gs(v) load_gs_index(v)
192#define loadsegment_fs(v) loadsegment(fs, v)
193#define loadsegment_ds(v) loadsegment(ds, v)
194#define loadsegment_es(v) loadsegment(es, v)
195
196#define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; })
197#define set_user_seg(seg, v) loadsegment_##seg(v)
198
193#define COPY(x) { \ 199#define COPY(x) { \
194 get_user_ex(regs->x, &sc->x); \ 200 get_user_ex(regs->x, &sc->x); \
195} 201}
196 202
197#define COPY_SEG_CPL3(seg) { \ 203#define GET_SEG(seg) ({ \
198 unsigned short tmp; \ 204 unsigned short tmp; \
199 get_user_ex(tmp, &sc->seg); \ 205 get_user_ex(tmp, &sc->seg); \
200 regs->seg = tmp | 3; \ 206 tmp; \
201} 207})
208
209#define COPY_SEG_CPL3(seg) do { \
210 regs->seg = GET_SEG(seg) | 3; \
211} while (0)
202 212
203#define RELOAD_SEG(seg) { \ 213#define RELOAD_SEG(seg) { \
204 unsigned int cur, pre; \ 214 unsigned int pre = GET_SEG(seg); \
205 get_user_ex(pre, &sc->seg); \ 215 unsigned int cur = get_user_seg(seg); \
206 savesegment(seg, cur); \
207 pre |= 3; \ 216 pre |= 3; \
208 if (pre != cur) \ 217 if (pre != cur) \
209 loadsegment(seg, pre); \ 218 set_user_seg(seg, pre); \
210} 219}
211 220
212static int ia32_restore_sigcontext(struct pt_regs *regs, 221static int ia32_restore_sigcontext(struct pt_regs *regs,
213 struct sigcontext_ia32 __user *sc, 222 struct sigcontext_ia32 __user *sc,
214 unsigned int *pax) 223 unsigned int *pax)
215{ 224{
216 unsigned int tmpflags, gs, oldgs, err = 0; 225 unsigned int tmpflags, err = 0;
217 void __user *buf; 226 void __user *buf;
218 u32 tmp; 227 u32 tmp;
219 228
220 /* Always make any pending restarted system calls return -EINTR */ 229 /* Always make any pending restarted system calls return -EINTR */
221 current_thread_info()->restart_block.fn = do_no_restart_syscall; 230 current_thread_info()->restart_block.fn = do_no_restart_syscall;
222 231
223#if DEBUG_SIG
224 printk(KERN_DEBUG "SIG restore_sigcontext: "
225 "sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
226 sc, sc->err, sc->ip, sc->cs, sc->flags);
227#endif
228
229 get_user_try { 232 get_user_try {
230 /* 233 /*
231 * Reload fs and gs if they have changed in the signal 234 * Reload fs and gs if they have changed in the signal
@@ -233,12 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
233 * the handler, but does not clobber them at least in the 236 * the handler, but does not clobber them at least in the
234 * normal case. 237 * normal case.
235 */ 238 */
236 get_user_ex(gs, &sc->gs); 239 RELOAD_SEG(gs);
237 gs |= 3;
238 savesegment(gs, oldgs);
239 if (gs != oldgs)
240 load_gs_index(gs);
241
242 RELOAD_SEG(fs); 240 RELOAD_SEG(fs);
243 RELOAD_SEG(ds); 241 RELOAD_SEG(ds);
244 RELOAD_SEG(es); 242 RELOAD_SEG(es);
@@ -337,17 +335,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
337 void __user *fpstate, 335 void __user *fpstate,
338 struct pt_regs *regs, unsigned int mask) 336 struct pt_regs *regs, unsigned int mask)
339{ 337{
340 int tmp, err = 0; 338 int err = 0;
341 339
342 put_user_try { 340 put_user_try {
343 savesegment(gs, tmp); 341 put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs);
344 put_user_ex(tmp, (unsigned int __user *)&sc->gs); 342 put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs);
345 savesegment(fs, tmp); 343 put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds);
346 put_user_ex(tmp, (unsigned int __user *)&sc->fs); 344 put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es);
347 savesegment(ds, tmp);
348 put_user_ex(tmp, (unsigned int __user *)&sc->ds);
349 savesegment(es, tmp);
350 put_user_ex(tmp, (unsigned int __user *)&sc->es);
351 345
352 put_user_ex(regs->di, &sc->di); 346 put_user_ex(regs->di, &sc->di);
353 put_user_ex(regs->si, &sc->si); 347 put_user_ex(regs->si, &sc->si);
@@ -488,11 +482,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
488 regs->cs = __USER32_CS; 482 regs->cs = __USER32_CS;
489 regs->ss = __USER32_DS; 483 regs->ss = __USER32_DS;
490 484
491#if DEBUG_SIG
492 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
493 current->comm, current->pid, frame, regs->ip, frame->pretcode);
494#endif
495
496 return 0; 485 return 0;
497} 486}
498 487
@@ -574,10 +563,5 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
574 regs->cs = __USER32_CS; 563 regs->cs = __USER32_CS;
575 regs->ss = __USER32_DS; 564 regs->ss = __USER32_DS;
576 565
577#if DEBUG_SIG
578 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
579 current->comm, current->pid, frame, regs->ip, frame->pretcode);
580#endif
581
582 return 0; 566 return 0;
583} 567}
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 4f8e820cf38f..683d0b4c00fc 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -124,10 +124,15 @@ static inline void *phys_to_virt(phys_addr_t address)
124 124
125/* 125/*
126 * ISA I/O bus memory addresses are 1:1 with the physical address. 126 * ISA I/O bus memory addresses are 1:1 with the physical address.
127 * However, we truncate the address to unsigned int to avoid undesirable
128 * promitions in legacy drivers.
127 */ 129 */
128#define isa_virt_to_bus (unsigned long)virt_to_phys 130static inline unsigned int isa_virt_to_bus(volatile void *address)
129#define isa_page_to_bus page_to_phys 131{
130#define isa_bus_to_virt phys_to_virt 132 return (unsigned int)virt_to_phys(address);
133}
134#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
135#define isa_bus_to_virt phys_to_virt
131 136
132/* 137/*
133 * However PCI ones are not necessarily 1:1 and therefore these interfaces 138 * However PCI ones are not necessarily 1:1 and therefore these interfaces
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c1f06289b14b..bd46495ff7de 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -23,6 +23,12 @@
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25 25
26int
27reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot);
28
29void
30free_io_memtype(u64 base, unsigned long size);
31
26void * 32void *
27iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 33iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
28 34
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index b07278c55e9e..8a285f356f8a 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -128,7 +128,7 @@
128#ifndef __ASSEMBLY__ 128#ifndef __ASSEMBLY__
129static inline int invalid_vm86_irq(int irq) 129static inline int invalid_vm86_irq(int irq)
130{ 130{
131 return irq < 3 || irq > 15; 131 return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
132} 132}
133#endif 133#endif
134 134
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 5d98d0b68ffc..9320e2a8a26a 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -52,70 +52,14 @@
52 52
53#endif 53#endif
54 54
55#define GLOBAL(name) \
56 .globl name; \
57 name:
58
55#ifdef CONFIG_X86_ALIGNMENT_16 59#ifdef CONFIG_X86_ALIGNMENT_16
56#define __ALIGN .align 16,0x90 60#define __ALIGN .align 16,0x90
57#define __ALIGN_STR ".align 16,0x90" 61#define __ALIGN_STR ".align 16,0x90"
58#endif 62#endif
59 63
60/*
61 * to check ENTRY_X86/END_X86 and
62 * KPROBE_ENTRY_X86/KPROBE_END_X86
63 * unbalanced-missed-mixed appearance
64 */
65#define __set_entry_x86 .set ENTRY_X86_IN, 0
66#define __unset_entry_x86 .set ENTRY_X86_IN, 1
67#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
68#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
69
70#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
71
72#define __check_entry_x86 \
73 .ifdef ENTRY_X86_IN; \
74 .ifeq ENTRY_X86_IN; \
75 __macro_err_x86; \
76 .abort; \
77 .endif; \
78 .endif
79
80#define __check_kprobe_x86 \
81 .ifdef KPROBE_X86_IN; \
82 .ifeq KPROBE_X86_IN; \
83 __macro_err_x86; \
84 .abort; \
85 .endif; \
86 .endif
87
88#define __check_entry_kprobe_x86 \
89 __check_entry_x86; \
90 __check_kprobe_x86
91
92#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
93
94#define ENTRY_X86(name) \
95 __check_entry_kprobe_x86; \
96 __set_entry_x86; \
97 .globl name; \
98 __ALIGN; \
99 name:
100
101#define END_X86(name) \
102 __unset_entry_x86; \
103 __check_entry_kprobe_x86; \
104 .size name, .-name
105
106#define KPROBE_ENTRY_X86(name) \
107 __check_entry_kprobe_x86; \
108 __set_kprobe_x86; \
109 .pushsection .kprobes.text, "ax"; \
110 .globl name; \
111 __ALIGN; \
112 name:
113
114#define KPROBE_END_X86(name) \
115 __unset_kprobe_x86; \
116 __check_entry_kprobe_x86; \
117 .size name, .-name; \
118 .popsection
119
120#endif /* _ASM_X86_LINKAGE_H */ 64#endif /* _ASM_X86_LINKAGE_H */
121 65
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index b5486aaf36ec..f1e4a79a6e41 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -33,12 +33,10 @@
33/* 44=32+12, the limit we can fit into an unsigned long pfn */ 33/* 44=32+12, the limit we can fit into an unsigned long pfn */
34#define __PHYSICAL_MASK_SHIFT 44 34#define __PHYSICAL_MASK_SHIFT 44
35#define __VIRTUAL_MASK_SHIFT 32 35#define __VIRTUAL_MASK_SHIFT 32
36#define PAGETABLE_LEVELS 3
37 36
38#else /* !CONFIG_X86_PAE */ 37#else /* !CONFIG_X86_PAE */
39#define __PHYSICAL_MASK_SHIFT 32 38#define __PHYSICAL_MASK_SHIFT 32
40#define __VIRTUAL_MASK_SHIFT 32 39#define __VIRTUAL_MASK_SHIFT 32
41#define PAGETABLE_LEVELS 2
42#endif /* CONFIG_X86_PAE */ 40#endif /* CONFIG_X86_PAE */
43 41
44#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index bc73af3eda9c..d38c91b70248 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,8 +1,6 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H 1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H 2#define _ASM_X86_PAGE_64_DEFS_H
3 3
4#define PAGETABLE_LEVELS 4
5
6#define THREAD_ORDER 1 4#define THREAD_ORDER 1
7#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 5#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
8#define CURRENT_MASK (~(THREAD_SIZE - 1)) 6#define CURRENT_MASK (~(THREAD_SIZE - 1))
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 2c52ff767584..2d625da6603c 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -16,12 +16,6 @@
16 (ie, 32-bit PAE). */ 16 (ie, 32-bit PAE). */
17#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 17#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
18 18
19/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
20#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
21
22/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
23#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
24
25#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 19#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
26#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 20#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
27 21
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 9709fdff6615..b0e70056838e 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end,
15 unsigned long req_type, unsigned long *ret_type); 15 unsigned long req_type, unsigned long *ret_type);
16extern int free_memtype(u64 start, u64 end); 16extern int free_memtype(u64 start, u64 end);
17 17
18extern int kernel_map_sync_memtype(u64 base, unsigned long size,
19 unsigned long flag);
20
18#endif /* _ASM_X86_PAT_H */ 21#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index 09ae67efcebd..daacc23e3fb9 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -17,6 +17,7 @@ typedef union {
17#endif /* !__ASSEMBLY__ */ 17#endif /* !__ASSEMBLY__ */
18 18
19#define SHARED_KERNEL_PMD 0 19#define SHARED_KERNEL_PMD 0
20#define PAGETABLE_LEVELS 2
20 21
21/* 22/*
22 * traditional i386 two-level paging structure: 23 * traditional i386 two-level paging structure:
@@ -25,6 +26,7 @@ typedef union {
25#define PGDIR_SHIFT 22 26#define PGDIR_SHIFT 22
26#define PTRS_PER_PGD 1024 27#define PTRS_PER_PGD 1024
27 28
29
28/* 30/*
29 * the i386 is two-level, so we don't really have any 31 * the i386 is two-level, so we don't really have any
30 * PMD directory physically. 32 * PMD directory physically.
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index bcc89625ebe5..1bd5876c8649 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -24,6 +24,8 @@ typedef union {
24#define SHARED_KERNEL_PMD 1 24#define SHARED_KERNEL_PMD 1
25#endif 25#endif
26 26
27#define PAGETABLE_LEVELS 3
28
27/* 29/*
28 * PGDIR_SHIFT determines what a top-level page table entry can map 30 * PGDIR_SHIFT determines what a top-level page table entry can map
29 */ 31 */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 2f59135c6f2a..fbf42b8e0383 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -18,6 +18,7 @@ typedef struct { pteval_t pte; } pte_t;
18#endif /* !__ASSEMBLY__ */ 18#endif /* !__ASSEMBLY__ */
19 19
20#define SHARED_KERNEL_PMD 0 20#define SHARED_KERNEL_PMD 0
21#define PAGETABLE_LEVELS 4
21 22
22/* 23/*
23 * PGDIR_SHIFT determines what a top-level page table entry can map 24 * PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 9dafe87be2de..4d258ad76a0f 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -173,6 +173,12 @@
173 173
174#include <linux/types.h> 174#include <linux/types.h>
175 175
176/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
177#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
178
179/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
180#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
181
176typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; 182typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
177 183
178typedef struct { pgdval_t pgd; } pgd_t; 184typedef struct { pgdval_t pgd; } pgd_t;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index dabab1a19ddd..c7a98f738210 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -403,7 +403,6 @@ DECLARE_PER_CPU(unsigned long, stack_canary);
403#endif 403#endif
404#endif /* X86_64 */ 404#endif /* X86_64 */
405 405
406extern void print_cpu_info(struct cpuinfo_x86 *);
407extern unsigned int xstate_size; 406extern unsigned int xstate_size;
408extern void free_thread_xstate(struct task_struct *); 407extern void free_thread_xstate(struct task_struct *);
409extern struct kmem_cache *task_xstate_cachep; 408extern struct kmem_cache *task_xstate_cachep;
@@ -862,6 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
862 * User space process size: 3GB (default). 861 * User space process size: 3GB (default).
863 */ 862 */
864#define TASK_SIZE PAGE_OFFSET 863#define TASK_SIZE PAGE_OFFSET
864#define TASK_SIZE_MAX TASK_SIZE
865#define STACK_TOP TASK_SIZE 865#define STACK_TOP TASK_SIZE
866#define STACK_TOP_MAX STACK_TOP 866#define STACK_TOP_MAX STACK_TOP
867 867
@@ -921,7 +921,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
921/* 921/*
922 * User space process size. 47bits minus one guard page. 922 * User space process size. 47bits minus one guard page.
923 */ 923 */
924#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) 924#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
925 925
926/* This decides where the kernel will search for a free chunk of vm 926/* This decides where the kernel will search for a free chunk of vm
927 * space during mmap's. 927 * space during mmap's.
@@ -930,12 +930,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
930 0xc0000000 : 0xFFFFe000) 930 0xc0000000 : 0xFFFFe000)
931 931
932#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 932#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
933 IA32_PAGE_OFFSET : TASK_SIZE64) 933 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
934#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 934#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
935 IA32_PAGE_OFFSET : TASK_SIZE64) 935 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
936 936
937#define STACK_TOP TASK_SIZE 937#define STACK_TOP TASK_SIZE
938#define STACK_TOP_MAX TASK_SIZE64 938#define STACK_TOP_MAX TASK_SIZE_MAX
939 939
940#define INIT_THREAD { \ 940#define INIT_THREAD { \
941 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 941 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 258ef730aaa4..7043408f6904 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -82,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
82/* kernel/signal_64.c */ 82/* kernel/signal_64.c */
83asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, 83asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
84 struct pt_regs *); 84 struct pt_regs *);
85asmlinkage long sys_rt_sigreturn(struct pt_regs *); 85long sys_rt_sigreturn(struct pt_regs *);
86 86
87/* kernel/sys_x86_64.c */ 87/* kernel/sys_x86_64.c */
88asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, 88asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 5e06259e90e5..a0ba61386972 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
157} 157}
158 158
159static __always_inline unsigned long __copy_from_user_nocache(void *to, 159static __always_inline unsigned long __copy_from_user_nocache(void *to,
160 const void __user *from, unsigned long n) 160 const void __user *from, unsigned long n, unsigned long total)
161{ 161{
162 might_fault(); 162 might_fault();
163 if (__builtin_constant_p(n)) { 163 if (__builtin_constant_p(n)) {
@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
180 180
181static __always_inline unsigned long 181static __always_inline unsigned long
182__copy_from_user_inatomic_nocache(void *to, const void __user *from, 182__copy_from_user_inatomic_nocache(void *to, const void __user *from,
183 unsigned long n) 183 unsigned long n, unsigned long total)
184{ 184{
185 return __copy_from_user_ll_nocache_nozero(to, from, n); 185 return __copy_from_user_ll_nocache_nozero(to, from, n);
186} 186}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 84210c479fca..dcaa0404cf7b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -189,17 +189,28 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
189 unsigned size, int zerorest); 189 unsigned size, int zerorest);
190 190
191static inline int __copy_from_user_nocache(void *dst, const void __user *src, 191static inline int __copy_from_user_nocache(void *dst, const void __user *src,
192 unsigned size) 192 unsigned size, unsigned long total)
193{ 193{
194 might_sleep(); 194 might_sleep();
195 return __copy_user_nocache(dst, src, size, 1); 195 /*
196 * In practice this limit means that large file write()s
197 * which get chunked to 4K copies get handled via
198 * non-temporal stores here. Smaller writes get handled
199 * via regular __copy_from_user():
200 */
201 if (likely(total >= PAGE_SIZE))
202 return __copy_user_nocache(dst, src, size, 1);
203 else
204 return __copy_from_user(dst, src, size);
196} 205}
197 206
198static inline int __copy_from_user_inatomic_nocache(void *dst, 207static inline int __copy_from_user_inatomic_nocache(void *dst,
199 const void __user *src, 208 const void __user *src, unsigned size, unsigned total)
200 unsigned size)
201{ 209{
202 return __copy_user_nocache(dst, src, size, 0); 210 if (likely(total >= PAGE_SIZE))
211 return __copy_user_nocache(dst, src, size, 0);
212 else
213 return __copy_from_user_inatomic(dst, src, size);
203} 214}
204 215
205unsigned long 216unsigned long
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index 3355973b12ac..580b4e296010 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -3,8 +3,8 @@
3 */ 3 */
4#include <asm/segment.h> 4#include <asm/segment.h>
5#include <asm/msr-index.h> 5#include <asm/msr-index.h>
6#include <asm/page.h> 6#include <asm/page_types.h>
7#include <asm/pgtable.h> 7#include <asm/pgtable_types.h>
8#include <asm/processor-flags.h> 8#include <asm/processor-flags.h>
9 9
10 .code16 10 .code16
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index a12e6a9fb659..8ded418b0593 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,7 +1,7 @@
1 .section .text.page_aligned 1 .section .text.page_aligned
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page.h> 4#include <asm/page_types.h>
5 5
6# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 6# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
7 7
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 96258d9dc974..8ea5164cbd04 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -1,8 +1,8 @@
1.text 1.text
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/pgtable.h> 4#include <asm/pgtable_types.h>
5#include <asm/page.h> 5#include <asm/page_types.h>
6#include <asm/msr.h> 6#include <asm/msr.h>
7#include <asm/asm-offsets.h> 7#include <asm/asm-offsets.h>
8 8
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a84ac7b570e6..6907b8e85d52 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
498 */ 498 */
499void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 499void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
500{ 500{
501 unsigned long flags;
502 char *vaddr; 501 char *vaddr;
503 int nr_pages = 2; 502 int nr_pages = 2;
504 struct page *pages[2]; 503 struct page *pages[2];
505 int i; 504 int i;
506 505
506 might_sleep();
507 if (!core_kernel_text((unsigned long)addr)) { 507 if (!core_kernel_text((unsigned long)addr)) {
508 pages[0] = vmalloc_to_page(addr); 508 pages[0] = vmalloc_to_page(addr);
509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
517 nr_pages = 1; 517 nr_pages = 1;
518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
519 BUG_ON(!vaddr); 519 BUG_ON(!vaddr);
520 local_irq_save(flags); 520 local_irq_disable();
521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
522 local_irq_restore(flags); 522 local_irq_enable();
523 vunmap(vaddr); 523 vunmap(vaddr);
524 sync_core(); 524 sync_core();
525 /* Could also do a CLFLUSH here to speed up CPU recovery; but 525 /* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c2f930d86640..41ab3f064cb1 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
204 } 204 }
205 /* Enable Enhanced PowerSaver */ 205 /* Enable Enhanced PowerSaver */
206 rdmsrl(MSR_IA32_MISC_ENABLE, val); 206 rdmsrl(MSR_IA32_MISC_ENABLE, val);
207 if (!(val & 1 << 16)) { 207 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
208 val |= 1 << 16; 208 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
209 wrmsrl(MSR_IA32_MISC_ENABLE, val); 209 wrmsrl(MSR_IA32_MISC_ENABLE, val);
210 /* Can be locked at 0 */ 210 /* Can be locked at 0 */
211 rdmsrl(MSR_IA32_MISC_ENABLE, val); 211 rdmsrl(MSR_IA32_MISC_ENABLE, val);
212 if (!(val & 1 << 16)) { 212 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); 213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
214 return -ENODEV; 214 return -ENODEV;
215 } 215 }
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index f08998278a3a..c9f1fdc02830 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
390 enable it if not. */ 390 enable it if not. */
391 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 391 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
392 392
393 if (!(l & (1<<16))) { 393 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
394 l |= (1<<16); 394 l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); 395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
396 wrmsr(MSR_IA32_MISC_ENABLE, l, h); 396 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
397 397
398 /* check to see if it stuck */ 398 /* check to see if it stuck */
399 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 399 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
400 if (!(l & (1<<16))) { 400 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
401 printk(KERN_INFO PFX 401 printk(KERN_INFO PFX
402 "couldn't enable Enhanced SpeedStep\n"); 402 "couldn't enable Enhanced SpeedStep\n");
403 return -ENODEV; 403 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 7aeef1d327b1..25c559ba8d54 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -146,10 +146,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
146 */ 146 */
147 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 147 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
148 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 148 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
149 if ((lo & (1<<9)) == 0) { 149 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
150 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 150 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
151 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 151 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
152 lo |= (1<<9); /* Disable hw prefetching */ 152 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
153 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 153 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
154 } 154 }
155 } 155 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index 4b7d78cdc0a5..aa5e287c98e0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -49,13 +49,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
49 */ 49 */
50 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 50 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
51 h = apic_read(APIC_LVTTHMR); 51 h = apic_read(APIC_LVTTHMR);
52 if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { 52 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
53 printk(KERN_DEBUG 53 printk(KERN_DEBUG
54 "CPU%d: Thermal monitoring handled by SMI\n", cpu); 54 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
55 return; 55 return;
56 } 56 }
57 57
58 if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) 58 if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
59 tm2 = 1; 59 tm2 = 1;
60 60
61 if (h & APIC_VECTOR_MASK) { 61 if (h & APIC_VECTOR_MASK) {
@@ -73,7 +73,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
73 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); 73 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
74 74
75 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 75 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
76 wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); 76 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
77 77
78 l = apic_read(APIC_LVTTHMR); 78 l = apic_read(APIC_LVTTHMR);
79 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 79 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index 9b60fce09f75..f53bdcbaf382 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
85 */ 85 */
86 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 86 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
87 h = apic_read(APIC_LVTTHMR); 87 h = apic_read(APIC_LVTTHMR);
88 if ((l & (1<<3)) && (h & APIC_DM_SMI)) { 88 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", 89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
90 cpu); 90 cpu);
91 return; /* -EBUSY */ 91 return; /* -EBUSY */
@@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
111 vendor_thermal_interrupt = intel_thermal_interrupt; 111 vendor_thermal_interrupt = intel_thermal_interrupt;
112 112
113 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 113 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
114 wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); 114 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
115 115
116 l = apic_read(APIC_LVTTHMR); 116 l = apic_read(APIC_LVTTHMR);
117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e85826829cf2..508bec1cee27 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -858,6 +858,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
858 */ 858 */
859void __init reserve_early(u64 start, u64 end, char *name) 859void __init reserve_early(u64 start, u64 end, char *name)
860{ 860{
861 if (start >= end)
862 return;
863
861 drop_overlaps_that_are_ok(start, end); 864 drop_overlaps_that_are_ok(start, end);
862 __reserve_early(start, end, name, 0); 865 __reserve_early(start, end, name, 0);
863} 866}
diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
index ef00bb77d7e4..fbe66e626c09 100644
--- a/arch/x86/kernel/efi_stub_32.S
+++ b/arch/x86/kernel/efi_stub_32.S
@@ -6,7 +6,7 @@
6 */ 6 */
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/page.h> 9#include <asm/page_types.h>
10 10
11/* 11/*
12 * efi_call_phys(void *, ...) is a function with variable parameters. 12 * efi_call_phys(void *, ...) is a function with variable parameters.
@@ -113,6 +113,7 @@ ENTRY(efi_call_phys)
113 movl (%edx), %ecx 113 movl (%edx), %ecx
114 pushl %ecx 114 pushl %ecx
115 ret 115 ret
116ENDPROC(efi_call_phys)
116.previous 117.previous
117 118
118.data 119.data
diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
index 99b47d48c9f4..4c07ccab8146 100644
--- a/arch/x86/kernel/efi_stub_64.S
+++ b/arch/x86/kernel/efi_stub_64.S
@@ -41,6 +41,7 @@ ENTRY(efi_call0)
41 addq $32, %rsp 41 addq $32, %rsp
42 RESTORE_XMM 42 RESTORE_XMM
43 ret 43 ret
44ENDPROC(efi_call0)
44 45
45ENTRY(efi_call1) 46ENTRY(efi_call1)
46 SAVE_XMM 47 SAVE_XMM
@@ -50,6 +51,7 @@ ENTRY(efi_call1)
50 addq $32, %rsp 51 addq $32, %rsp
51 RESTORE_XMM 52 RESTORE_XMM
52 ret 53 ret
54ENDPROC(efi_call1)
53 55
54ENTRY(efi_call2) 56ENTRY(efi_call2)
55 SAVE_XMM 57 SAVE_XMM
@@ -59,6 +61,7 @@ ENTRY(efi_call2)
59 addq $32, %rsp 61 addq $32, %rsp
60 RESTORE_XMM 62 RESTORE_XMM
61 ret 63 ret
64ENDPROC(efi_call2)
62 65
63ENTRY(efi_call3) 66ENTRY(efi_call3)
64 SAVE_XMM 67 SAVE_XMM
@@ -69,6 +72,7 @@ ENTRY(efi_call3)
69 addq $32, %rsp 72 addq $32, %rsp
70 RESTORE_XMM 73 RESTORE_XMM
71 ret 74 ret
75ENDPROC(efi_call3)
72 76
73ENTRY(efi_call4) 77ENTRY(efi_call4)
74 SAVE_XMM 78 SAVE_XMM
@@ -80,6 +84,7 @@ ENTRY(efi_call4)
80 addq $32, %rsp 84 addq $32, %rsp
81 RESTORE_XMM 85 RESTORE_XMM
82 ret 86 ret
87ENDPROC(efi_call4)
83 88
84ENTRY(efi_call5) 89ENTRY(efi_call5)
85 SAVE_XMM 90 SAVE_XMM
@@ -92,6 +97,7 @@ ENTRY(efi_call5)
92 addq $48, %rsp 97 addq $48, %rsp
93 RESTORE_XMM 98 RESTORE_XMM
94 ret 99 ret
100ENDPROC(efi_call5)
95 101
96ENTRY(efi_call6) 102ENTRY(efi_call6)
97 SAVE_XMM 103 SAVE_XMM
@@ -107,3 +113,4 @@ ENTRY(efi_call6)
107 addq $48, %rsp 113 addq $48, %rsp
108 RESTORE_XMM 114 RESTORE_XMM
109 ret 115 ret
116ENDPROC(efi_call6)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index e99206831459..899e8938e79f 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -47,7 +47,7 @@
47#include <asm/errno.h> 47#include <asm/errno.h>
48#include <asm/segment.h> 48#include <asm/segment.h>
49#include <asm/smp.h> 49#include <asm/smp.h>
50#include <asm/page.h> 50#include <asm/page_types.h>
51#include <asm/desc.h> 51#include <asm/desc.h>
52#include <asm/percpu.h> 52#include <asm/percpu.h>
53#include <asm/dwarf2.h> 53#include <asm/dwarf2.h>
@@ -1359,7 +1359,7 @@ nmi_espfix_stack:
1359 CFI_ADJUST_CFA_OFFSET 4 1359 CFI_ADJUST_CFA_OFFSET 4
1360 pushl %esp 1360 pushl %esp
1361 CFI_ADJUST_CFA_OFFSET 4 1361 CFI_ADJUST_CFA_OFFSET 4
1362 addw $4, (%esp) 1362 addl $4, (%esp)
1363 /* copy the iret frame of 12 bytes */ 1363 /* copy the iret frame of 12 bytes */
1364 .rept 3 1364 .rept 3
1365 pushl 16(%esp) 1365 pushl 16(%esp)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index fbcf96b295ff..83d1836b9467 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -48,7 +48,7 @@
48#include <asm/unistd.h> 48#include <asm/unistd.h>
49#include <asm/thread_info.h> 49#include <asm/thread_info.h>
50#include <asm/hw_irq.h> 50#include <asm/hw_irq.h>
51#include <asm/page.h> 51#include <asm/page_types.h>
52#include <asm/irqflags.h> 52#include <asm/irqflags.h>
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
@@ -77,20 +77,17 @@ ENTRY(ftrace_caller)
77 movq 8(%rbp), %rsi 77 movq 8(%rbp), %rsi
78 subq $MCOUNT_INSN_SIZE, %rdi 78 subq $MCOUNT_INSN_SIZE, %rdi
79 79
80.globl ftrace_call 80GLOBAL(ftrace_call)
81ftrace_call:
82 call ftrace_stub 81 call ftrace_stub
83 82
84 MCOUNT_RESTORE_FRAME 83 MCOUNT_RESTORE_FRAME
85 84
86#ifdef CONFIG_FUNCTION_GRAPH_TRACER 85#ifdef CONFIG_FUNCTION_GRAPH_TRACER
87.globl ftrace_graph_call 86GLOBAL(ftrace_graph_call)
88ftrace_graph_call:
89 jmp ftrace_stub 87 jmp ftrace_stub
90#endif 88#endif
91 89
92.globl ftrace_stub 90GLOBAL(ftrace_stub)
93ftrace_stub:
94 retq 91 retq
95END(ftrace_caller) 92END(ftrace_caller)
96 93
@@ -110,8 +107,7 @@ ENTRY(mcount)
110 jnz ftrace_graph_caller 107 jnz ftrace_graph_caller
111#endif 108#endif
112 109
113.globl ftrace_stub 110GLOBAL(ftrace_stub)
114ftrace_stub:
115 retq 111 retq
116 112
117trace: 113trace:
@@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller)
148 retq 144 retq
149END(ftrace_graph_caller) 145END(ftrace_graph_caller)
150 146
151 147GLOBAL(return_to_handler)
152.globl return_to_handler
153return_to_handler:
154 subq $80, %rsp 148 subq $80, %rsp
155 149
156 movq %rax, (%rsp) 150 movq %rax, (%rsp)
@@ -188,6 +182,7 @@ return_to_handler:
188ENTRY(native_usergs_sysret64) 182ENTRY(native_usergs_sysret64)
189 swapgs 183 swapgs
190 sysretq 184 sysretq
185ENDPROC(native_usergs_sysret64)
191#endif /* CONFIG_PARAVIRT */ 186#endif /* CONFIG_PARAVIRT */
192 187
193 188
@@ -633,16 +628,14 @@ tracesys:
633 * Syscall return path ending with IRET. 628 * Syscall return path ending with IRET.
634 * Has correct top of stack, but partial stack frame. 629 * Has correct top of stack, but partial stack frame.
635 */ 630 */
636 .globl int_ret_from_sys_call 631GLOBAL(int_ret_from_sys_call)
637 .globl int_with_check
638int_ret_from_sys_call:
639 DISABLE_INTERRUPTS(CLBR_NONE) 632 DISABLE_INTERRUPTS(CLBR_NONE)
640 TRACE_IRQS_OFF 633 TRACE_IRQS_OFF
641 testl $3,CS-ARGOFFSET(%rsp) 634 testl $3,CS-ARGOFFSET(%rsp)
642 je retint_restore_args 635 je retint_restore_args
643 movl $_TIF_ALLWORK_MASK,%edi 636 movl $_TIF_ALLWORK_MASK,%edi
644 /* edi: mask to check */ 637 /* edi: mask to check */
645int_with_check: 638GLOBAL(int_with_check)
646 LOCKDEP_SYS_EXIT_IRQ 639 LOCKDEP_SYS_EXIT_IRQ
647 GET_THREAD_INFO(%rcx) 640 GET_THREAD_INFO(%rcx)
648 movl TI_flags(%rcx),%edx 641 movl TI_flags(%rcx),%edx
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2a0aad7718d5..c32ca19d591a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -11,8 +11,8 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/segment.h> 13#include <asm/segment.h>
14#include <asm/page.h> 14#include <asm/page_types.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable_types.h>
16#include <asm/desc.h> 16#include <asm/desc.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 2e648e3a5ea4..54b29bb24e71 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -329,8 +329,6 @@ early_idt_ripmsg:
329#endif /* CONFIG_EARLY_PRINTK */ 329#endif /* CONFIG_EARLY_PRINTK */
330 .previous 330 .previous
331 331
332.balign PAGE_SIZE
333
334#define NEXT_PAGE(name) \ 332#define NEXT_PAGE(name) \
335 .balign PAGE_SIZE; \ 333 .balign PAGE_SIZE; \
336ENTRY(name) 334ENTRY(name)
@@ -419,7 +417,7 @@ ENTRY(phys_base)
419 .section .bss, "aw", @nobits 417 .section .bss, "aw", @nobits
420 .align L1_CACHE_BYTES 418 .align L1_CACHE_BYTES
421ENTRY(idt_table) 419ENTRY(idt_table)
422 .skip 256 * 16 420 .skip IDT_ENTRIES * 16
423 421
424 .section .bss.page_aligned, "aw", @nobits 422 .section .bss.page_aligned, "aw", @nobits
425 .align PAGE_SIZE 423 .align PAGE_SIZE
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 37f420018a41..f5fc8c781a62 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -121,7 +121,7 @@ static void machine_kexec_page_table_set_one(
121static void machine_kexec_prepare_page_tables(struct kimage *image) 121static void machine_kexec_prepare_page_tables(struct kimage *image)
122{ 122{
123 void *control_page; 123 void *control_page;
124 pmd_t *pmd = 0; 124 pmd_t *pmd = NULL;
125 125
126 control_page = page_address(image->control_code_page); 126 control_page = page_address(image->control_code_page);
127#ifdef CONFIG_X86_PAE 127#ifdef CONFIG_X86_PAE
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index d2f7cd5b2c83..fb2159a5c817 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -268,7 +268,7 @@ static unsigned long debugreg_addr_limit(struct task_struct *task)
268 if (test_tsk_thread_flag(task, TIF_IA32)) 268 if (test_tsk_thread_flag(task, TIF_IA32))
269 return IA32_PAGE_OFFSET - 3; 269 return IA32_PAGE_OFFSET - 3;
270#endif 270#endif
271 return TASK_SIZE64 - 7; 271 return TASK_SIZE_MAX - 7;
272} 272}
273 273
274#endif /* CONFIG_X86_32 */ 274#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index a160f3119725..2064d0aa8d28 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/page.h> 10#include <asm/page_types.h>
11#include <asm/kexec.h> 11#include <asm/kexec.h>
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13 13
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index b0bbdd4829c9..d32cfb27a479 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -7,10 +7,10 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/page.h> 10#include <asm/page_types.h>
11#include <asm/kexec.h> 11#include <asm/kexec.h>
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable_types.h>
14 14
15/* 15/*
16 * Must be relocatable PIC code callable as a C function 16 * Must be relocatable PIC code callable as a C function
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index d8ccc3c6552f..66d874e5404c 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -29,7 +29,7 @@
29 29
30#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/segment.h> 31#include <asm/segment.h>
32#include <asm/page.h> 32#include <asm/page_types.h>
33 33
34/* We can free up trampoline after bootup if cpu hotplug is not supported. */ 34/* We can free up trampoline after bootup if cpu hotplug is not supported. */
35#ifndef CONFIG_HOTPLUG_CPU 35#ifndef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 95a012a4664e..cddfb8d386b9 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -25,8 +25,8 @@
25 */ 25 */
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <asm/pgtable.h> 28#include <asm/pgtable_types.h>
29#include <asm/page.h> 29#include <asm/page_types.h>
30#include <asm/msr.h> 30#include <asm/msr.h>
31#include <asm/segment.h> 31#include <asm/segment.h>
32#include <asm/processor-flags.h> 32#include <asm/processor-flags.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c8c0a7e530be..c05430ac1b44 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -942,7 +942,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
942 info.si_signo = SIGILL; 942 info.si_signo = SIGILL;
943 info.si_errno = 0; 943 info.si_errno = 0;
944 info.si_code = ILL_BADSTK; 944 info.si_code = ILL_BADSTK;
945 info.si_addr = 0; 945 info.si_addr = NULL;
946 if (notify_die(DIE_TRAP, "iret exception", 946 if (notify_die(DIE_TRAP, "iret exception",
947 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 947 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
948 return; 948 return;
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 49b4cd6707f9..33a788d5879c 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -287,8 +287,7 @@ static struct clocksource clocksource_vmi;
287static cycle_t read_real_cycles(void) 287static cycle_t read_real_cycles(void)
288{ 288{
289 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 289 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
290 return ret >= clocksource_vmi.cycle_last ? 290 return max(ret, clocksource_vmi.cycle_last);
291 ret : clocksource_vmi.cycle_last;
292} 291}
293 292
294static struct clocksource clocksource_vmi = { 293static struct clocksource clocksource_vmi = {
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 3eba7f7bac05..0d860963f268 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -12,7 +12,7 @@
12 12
13#include <asm-generic/vmlinux.lds.h> 13#include <asm-generic/vmlinux.lds.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/page.h> 15#include <asm/page_types.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/boot.h> 17#include <asm/boot.h>
18 18
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 087a7f2c639b..fbfced6f6800 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -6,7 +6,7 @@
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9#include <asm/page.h> 9#include <asm/page_types.h>
10 10
11#undef i386 /* in case the preprocessor is a 32bit one */ 11#undef i386 /* in case the preprocessor is a 32bit one */
12 12
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index ad374003742f..51f1504cddd9 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -28,7 +28,7 @@
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/dwarf2.h> 30#include <asm/dwarf2.h>
31#include <asm/page.h> 31#include <asm/page_types.h>
32#include <asm/errno.h> 32#include <asm/errno.h>
33#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
34#include <asm/thread_info.h> 34#include <asm/thread_info.h>
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 29644175490f..a03b7279efa0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1,74 +1,79 @@
1/* 1/*
2 * Copyright (C) 1995 Linus Torvalds 2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. 3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
4 */ 5 */
5
6#include <linux/signal.h>
7#include <linux/sched.h>
8#include <linux/kernel.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/ptrace.h>
13#include <linux/mmiotrace.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/interrupt.h> 6#include <linux/interrupt.h>
18#include <linux/init.h> 7#include <linux/mmiotrace.h>
19#include <linux/tty.h> 8#include <linux/bootmem.h>
20#include <linux/vt_kern.h> /* For unblank_screen() */
21#include <linux/compiler.h> 9#include <linux/compiler.h>
22#include <linux/highmem.h> 10#include <linux/highmem.h>
23#include <linux/bootmem.h> /* for max_low_pfn */
24#include <linux/vmalloc.h>
25#include <linux/module.h>
26#include <linux/kprobes.h> 11#include <linux/kprobes.h>
27#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/vmalloc.h>
14#include <linux/vt_kern.h>
15#include <linux/signal.h>
16#include <linux/kernel.h>
17#include <linux/ptrace.h>
18#include <linux/string.h>
19#include <linux/module.h>
28#include <linux/kdebug.h> 20#include <linux/kdebug.h>
21#include <linux/errno.h>
29#include <linux/magic.h> 22#include <linux/magic.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/mman.h>
27#include <linux/tty.h>
28#include <linux/smp.h>
29#include <linux/mm.h>
30
31#include <asm-generic/sections.h>
30 32
31#include <asm/system.h>
32#include <asm/desc.h>
33#include <asm/segment.h>
34#include <asm/pgalloc.h>
35#include <asm/smp.h>
36#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
34#include <asm/pgalloc.h>
35#include <asm/segment.h>
36#include <asm/system.h>
37#include <asm/proto.h> 37#include <asm/proto.h>
38#include <asm-generic/sections.h>
39#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/desc.h>
40 40
41/* 41/*
42 * Page fault error code bits 42 * Page fault error code bits:
43 * bit 0 == 0 means no page found, 1 means protection fault 43 *
44 * bit 1 == 0 means read, 1 means write 44 * bit 0 == 0: no page found 1: protection fault
45 * bit 2 == 0 means kernel, 1 means user-mode 45 * bit 1 == 0: read access 1: write access
46 * bit 3 == 1 means use of reserved bit detected 46 * bit 2 == 0: kernel-mode access 1: user-mode access
47 * bit 4 == 1 means fault was an instruction fetch 47 * bit 3 == 1: use of reserved bit detected
48 * bit 4 == 1: fault was an instruction fetch
48 */ 49 */
49#define PF_PROT (1<<0) 50enum x86_pf_error_code {
50#define PF_WRITE (1<<1)
51#define PF_USER (1<<2)
52#define PF_RSVD (1<<3)
53#define PF_INSTR (1<<4)
54 51
52 PF_PROT = 1 << 0,
53 PF_WRITE = 1 << 1,
54 PF_USER = 1 << 2,
55 PF_RSVD = 1 << 3,
56 PF_INSTR = 1 << 4,
57};
58
59/*
60 * Returns 0 if mmiotrace is disabled, or if the fault is not
61 * handled by mmiotrace:
62 */
55static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) 63static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
56{ 64{
57#ifdef CONFIG_MMIOTRACE
58 if (unlikely(is_kmmio_active())) 65 if (unlikely(is_kmmio_active()))
59 if (kmmio_handler(regs, addr) == 1) 66 if (kmmio_handler(regs, addr) == 1)
60 return -1; 67 return -1;
61#endif
62 return 0; 68 return 0;
63} 69}
64 70
65static inline int notify_page_fault(struct pt_regs *regs) 71static inline int notify_page_fault(struct pt_regs *regs)
66{ 72{
67#ifdef CONFIG_KPROBES
68 int ret = 0; 73 int ret = 0;
69 74
70 /* kprobe_running() needs smp_processor_id() */ 75 /* kprobe_running() needs smp_processor_id() */
71 if (!user_mode_vm(regs)) { 76 if (kprobes_built_in() && !user_mode_vm(regs)) {
72 preempt_disable(); 77 preempt_disable();
73 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 78 if (kprobe_running() && kprobe_fault_handler(regs, 14))
74 ret = 1; 79 ret = 1;
@@ -76,29 +81,76 @@ static inline int notify_page_fault(struct pt_regs *regs)
76 } 81 }
77 82
78 return ret; 83 return ret;
79#else
80 return 0;
81#endif
82} 84}
83 85
84/* 86/*
85 * X86_32 87 * Prefetch quirks:
86 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
87 * Check that here and ignore it.
88 * 88 *
89 * X86_64 89 * 32-bit mode:
90 * Sometimes the CPU reports invalid exceptions on prefetch.
91 * Check that here and ignore it.
92 * 90 *
93 * Opcode checker based on code by Richard Brunner 91 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
92 * Check that here and ignore it.
93 *
94 * 64-bit mode:
95 *
96 * Sometimes the CPU reports invalid exceptions on prefetch.
97 * Check that here and ignore it.
98 *
99 * Opcode checker based on code by Richard Brunner.
94 */ 100 */
95static int is_prefetch(struct pt_regs *regs, unsigned long error_code, 101static inline int
96 unsigned long addr) 102check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
103 unsigned char opcode, int *prefetch)
97{ 104{
105 unsigned char instr_hi = opcode & 0xf0;
106 unsigned char instr_lo = opcode & 0x0f;
107
108 switch (instr_hi) {
109 case 0x20:
110 case 0x30:
111 /*
112 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
113 * In X86_64 long mode, the CPU will signal invalid
114 * opcode if some of these prefixes are present so
115 * X86_64 will never get here anyway
116 */
117 return ((instr_lo & 7) == 0x6);
118#ifdef CONFIG_X86_64
119 case 0x40:
120 /*
121 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
122 * Need to figure out under what instruction mode the
123 * instruction was issued. Could check the LDT for lm,
124 * but for now it's good enough to assume that long
125 * mode only uses well known segments or kernel.
126 */
127 return (!user_mode(regs)) || (regs->cs == __USER_CS);
128#endif
129 case 0x60:
130 /* 0x64 thru 0x67 are valid prefixes in all modes. */
131 return (instr_lo & 0xC) == 0x4;
132 case 0xF0:
133 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
134 return !instr_lo || (instr_lo>>1) == 1;
135 case 0x00:
136 /* Prefetch instruction is 0x0F0D or 0x0F18 */
137 if (probe_kernel_address(instr, opcode))
138 return 0;
139
140 *prefetch = (instr_lo == 0xF) &&
141 (opcode == 0x0D || opcode == 0x18);
142 return 0;
143 default:
144 return 0;
145 }
146}
147
148static int
149is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
150{
151 unsigned char *max_instr;
98 unsigned char *instr; 152 unsigned char *instr;
99 int scan_more = 1;
100 int prefetch = 0; 153 int prefetch = 0;
101 unsigned char *max_instr;
102 154
103 /* 155 /*
104 * If it was a exec (instruction fetch) fault on NX page, then 156 * If it was a exec (instruction fetch) fault on NX page, then
@@ -107,106 +159,170 @@ static int is_prefetch(struct pt_regs *regs, unsigned long error_code,
107 if (error_code & PF_INSTR) 159 if (error_code & PF_INSTR)
108 return 0; 160 return 0;
109 161
110 instr = (unsigned char *)convert_ip_to_linear(current, regs); 162 instr = (void *)convert_ip_to_linear(current, regs);
111 max_instr = instr + 15; 163 max_instr = instr + 15;
112 164
113 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) 165 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
114 return 0; 166 return 0;
115 167
116 while (scan_more && instr < max_instr) { 168 while (instr < max_instr) {
117 unsigned char opcode; 169 unsigned char opcode;
118 unsigned char instr_hi;
119 unsigned char instr_lo;
120 170
121 if (probe_kernel_address(instr, opcode)) 171 if (probe_kernel_address(instr, opcode))
122 break; 172 break;
123 173
124 instr_hi = opcode & 0xf0;
125 instr_lo = opcode & 0x0f;
126 instr++; 174 instr++;
127 175
128 switch (instr_hi) { 176 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
129 case 0x20:
130 case 0x30:
131 /*
132 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
133 * In X86_64 long mode, the CPU will signal invalid
134 * opcode if some of these prefixes are present so
135 * X86_64 will never get here anyway
136 */
137 scan_more = ((instr_lo & 7) == 0x6);
138 break;
139#ifdef CONFIG_X86_64
140 case 0x40:
141 /*
142 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
143 * Need to figure out under what instruction mode the
144 * instruction was issued. Could check the LDT for lm,
145 * but for now it's good enough to assume that long
146 * mode only uses well known segments or kernel.
147 */
148 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
149 break;
150#endif
151 case 0x60:
152 /* 0x64 thru 0x67 are valid prefixes in all modes. */
153 scan_more = (instr_lo & 0xC) == 0x4;
154 break;
155 case 0xF0:
156 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
157 scan_more = !instr_lo || (instr_lo>>1) == 1;
158 break;
159 case 0x00:
160 /* Prefetch instruction is 0x0F0D or 0x0F18 */
161 scan_more = 0;
162
163 if (probe_kernel_address(instr, opcode))
164 break;
165 prefetch = (instr_lo == 0xF) &&
166 (opcode == 0x0D || opcode == 0x18);
167 break; 177 break;
168 default:
169 scan_more = 0;
170 break;
171 }
172 } 178 }
173 return prefetch; 179 return prefetch;
174} 180}
175 181
176static void force_sig_info_fault(int si_signo, int si_code, 182static void
177 unsigned long address, struct task_struct *tsk) 183force_sig_info_fault(int si_signo, int si_code, unsigned long address,
184 struct task_struct *tsk)
178{ 185{
179 siginfo_t info; 186 siginfo_t info;
180 187
181 info.si_signo = si_signo; 188 info.si_signo = si_signo;
182 info.si_errno = 0; 189 info.si_errno = 0;
183 info.si_code = si_code; 190 info.si_code = si_code;
184 info.si_addr = (void __user *)address; 191 info.si_addr = (void __user *)address;
192
185 force_sig_info(si_signo, &info, tsk); 193 force_sig_info(si_signo, &info, tsk);
186} 194}
187 195
188#ifdef CONFIG_X86_64 196DEFINE_SPINLOCK(pgd_lock);
189static int bad_address(void *p) 197LIST_HEAD(pgd_list);
198
199#ifdef CONFIG_X86_32
200static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
190{ 201{
191 unsigned long dummy; 202 unsigned index = pgd_index(address);
192 return probe_kernel_address((unsigned long *)p, dummy); 203 pgd_t *pgd_k;
204 pud_t *pud, *pud_k;
205 pmd_t *pmd, *pmd_k;
206
207 pgd += index;
208 pgd_k = init_mm.pgd + index;
209
210 if (!pgd_present(*pgd_k))
211 return NULL;
212
213 /*
214 * set_pgd(pgd, *pgd_k); here would be useless on PAE
215 * and redundant with the set_pmd() on non-PAE. As would
216 * set_pud.
217 */
218 pud = pud_offset(pgd, address);
219 pud_k = pud_offset(pgd_k, address);
220 if (!pud_present(*pud_k))
221 return NULL;
222
223 pmd = pmd_offset(pud, address);
224 pmd_k = pmd_offset(pud_k, address);
225 if (!pmd_present(*pmd_k))
226 return NULL;
227
228 if (!pmd_present(*pmd)) {
229 set_pmd(pmd, *pmd_k);
230 arch_flush_lazy_mmu_mode();
231 } else {
232 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
233 }
234
235 return pmd_k;
236}
237
238void vmalloc_sync_all(void)
239{
240 unsigned long address;
241
242 if (SHARED_KERNEL_PMD)
243 return;
244
245 for (address = VMALLOC_START & PMD_MASK;
246 address >= TASK_SIZE && address < FIXADDR_TOP;
247 address += PMD_SIZE) {
248
249 unsigned long flags;
250 struct page *page;
251
252 spin_lock_irqsave(&pgd_lock, flags);
253 list_for_each_entry(page, &pgd_list, lru) {
254 if (!vmalloc_sync_one(page_address(page), address))
255 break;
256 }
257 spin_unlock_irqrestore(&pgd_lock, flags);
258 }
259}
260
261/*
262 * 32-bit:
263 *
264 * Handle a fault on the vmalloc or module mapping area
265 */
266static noinline int vmalloc_fault(unsigned long address)
267{
268 unsigned long pgd_paddr;
269 pmd_t *pmd_k;
270 pte_t *pte_k;
271
272 /* Make sure we are in vmalloc area: */
273 if (!(address >= VMALLOC_START && address < VMALLOC_END))
274 return -1;
275
276 /*
277 * Synchronize this task's top level page-table
278 * with the 'reference' page table.
279 *
280 * Do _not_ use "current" here. We might be inside
281 * an interrupt in the middle of a task switch..
282 */
283 pgd_paddr = read_cr3();
284 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
285 if (!pmd_k)
286 return -1;
287
288 pte_k = pte_offset_kernel(pmd_k, address);
289 if (!pte_present(*pte_k))
290 return -1;
291
292 return 0;
293}
294
295/*
296 * Did it hit the DOS screen memory VA from vm86 mode?
297 */
298static inline void
299check_v8086_mode(struct pt_regs *regs, unsigned long address,
300 struct task_struct *tsk)
301{
302 unsigned long bit;
303
304 if (!v8086_mode(regs))
305 return;
306
307 bit = (address - 0xA0000) >> PAGE_SHIFT;
308 if (bit < 32)
309 tsk->thread.screen_bitmap |= 1 << bit;
193} 310}
194#endif
195 311
196static void dump_pagetable(unsigned long address) 312static void dump_pagetable(unsigned long address)
197{ 313{
198#ifdef CONFIG_X86_32
199 __typeof__(pte_val(__pte(0))) page; 314 __typeof__(pte_val(__pte(0))) page;
200 315
201 page = read_cr3(); 316 page = read_cr3();
202 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; 317 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
318
203#ifdef CONFIG_X86_PAE 319#ifdef CONFIG_X86_PAE
204 printk("*pdpt = %016Lx ", page); 320 printk("*pdpt = %016Lx ", page);
205 if ((page >> PAGE_SHIFT) < max_low_pfn 321 if ((page >> PAGE_SHIFT) < max_low_pfn
206 && page & _PAGE_PRESENT) { 322 && page & _PAGE_PRESENT) {
207 page &= PAGE_MASK; 323 page &= PAGE_MASK;
208 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) 324 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
209 & (PTRS_PER_PMD - 1)]; 325 & (PTRS_PER_PMD - 1)];
210 printk(KERN_CONT "*pde = %016Lx ", page); 326 printk(KERN_CONT "*pde = %016Lx ", page);
211 page &= ~_PAGE_NX; 327 page &= ~_PAGE_NX;
212 } 328 }
@@ -218,19 +334,145 @@ static void dump_pagetable(unsigned long address)
218 * We must not directly access the pte in the highpte 334 * We must not directly access the pte in the highpte
219 * case if the page table is located in highmem. 335 * case if the page table is located in highmem.
220 * And let's rather not kmap-atomic the pte, just in case 336 * And let's rather not kmap-atomic the pte, just in case
221 * it's allocated already. 337 * it's allocated already:
222 */ 338 */
223 if ((page >> PAGE_SHIFT) < max_low_pfn 339 if ((page >> PAGE_SHIFT) < max_low_pfn
224 && (page & _PAGE_PRESENT) 340 && (page & _PAGE_PRESENT)
225 && !(page & _PAGE_PSE)) { 341 && !(page & _PAGE_PSE)) {
342
226 page &= PAGE_MASK; 343 page &= PAGE_MASK;
227 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) 344 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
228 & (PTRS_PER_PTE - 1)]; 345 & (PTRS_PER_PTE - 1)];
229 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); 346 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
230 } 347 }
231 348
232 printk("\n"); 349 printk("\n");
233#else /* CONFIG_X86_64 */ 350}
351
352#else /* CONFIG_X86_64: */
353
354void vmalloc_sync_all(void)
355{
356 unsigned long address;
357
358 for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
359 address += PGDIR_SIZE) {
360
361 const pgd_t *pgd_ref = pgd_offset_k(address);
362 unsigned long flags;
363 struct page *page;
364
365 if (pgd_none(*pgd_ref))
366 continue;
367
368 spin_lock_irqsave(&pgd_lock, flags);
369 list_for_each_entry(page, &pgd_list, lru) {
370 pgd_t *pgd;
371 pgd = (pgd_t *)page_address(page) + pgd_index(address);
372 if (pgd_none(*pgd))
373 set_pgd(pgd, *pgd_ref);
374 else
375 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
376 }
377 spin_unlock_irqrestore(&pgd_lock, flags);
378 }
379}
380
381/*
382 * 64-bit:
383 *
384 * Handle a fault on the vmalloc area
385 *
386 * This assumes no large pages in there.
387 */
388static noinline int vmalloc_fault(unsigned long address)
389{
390 pgd_t *pgd, *pgd_ref;
391 pud_t *pud, *pud_ref;
392 pmd_t *pmd, *pmd_ref;
393 pte_t *pte, *pte_ref;
394
395 /* Make sure we are in vmalloc area: */
396 if (!(address >= VMALLOC_START && address < VMALLOC_END))
397 return -1;
398
399 /*
400 * Copy kernel mappings over when needed. This can also
401 * happen within a race in page table update. In the later
402 * case just flush:
403 */
404 pgd = pgd_offset(current->active_mm, address);
405 pgd_ref = pgd_offset_k(address);
406 if (pgd_none(*pgd_ref))
407 return -1;
408
409 if (pgd_none(*pgd))
410 set_pgd(pgd, *pgd_ref);
411 else
412 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
413
414 /*
415 * Below here mismatches are bugs because these lower tables
416 * are shared:
417 */
418
419 pud = pud_offset(pgd, address);
420 pud_ref = pud_offset(pgd_ref, address);
421 if (pud_none(*pud_ref))
422 return -1;
423
424 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
425 BUG();
426
427 pmd = pmd_offset(pud, address);
428 pmd_ref = pmd_offset(pud_ref, address);
429 if (pmd_none(*pmd_ref))
430 return -1;
431
432 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
433 BUG();
434
435 pte_ref = pte_offset_kernel(pmd_ref, address);
436 if (!pte_present(*pte_ref))
437 return -1;
438
439 pte = pte_offset_kernel(pmd, address);
440
441 /*
442 * Don't use pte_page here, because the mappings can point
443 * outside mem_map, and the NUMA hash lookup cannot handle
444 * that:
445 */
446 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
447 BUG();
448
449 return 0;
450}
451
452static const char errata93_warning[] =
453KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
454KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
455KERN_ERR "******* Please consider a BIOS update.\n"
456KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
457
458/*
459 * No vm86 mode in 64-bit mode:
460 */
461static inline void
462check_v8086_mode(struct pt_regs *regs, unsigned long address,
463 struct task_struct *tsk)
464{
465}
466
467static int bad_address(void *p)
468{
469 unsigned long dummy;
470
471 return probe_kernel_address((unsigned long *)p, dummy);
472}
473
474static void dump_pagetable(unsigned long address)
475{
234 pgd_t *pgd; 476 pgd_t *pgd;
235 pud_t *pud; 477 pud_t *pud;
236 pmd_t *pmd; 478 pmd_t *pmd;
@@ -239,102 +481,77 @@ static void dump_pagetable(unsigned long address)
239 pgd = (pgd_t *)read_cr3(); 481 pgd = (pgd_t *)read_cr3();
240 482
241 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 483 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
484
242 pgd += pgd_index(address); 485 pgd += pgd_index(address);
243 if (bad_address(pgd)) goto bad; 486 if (bad_address(pgd))
487 goto bad;
488
244 printk("PGD %lx ", pgd_val(*pgd)); 489 printk("PGD %lx ", pgd_val(*pgd));
245 if (!pgd_present(*pgd)) goto ret; 490
491 if (!pgd_present(*pgd))
492 goto out;
246 493
247 pud = pud_offset(pgd, address); 494 pud = pud_offset(pgd, address);
248 if (bad_address(pud)) goto bad; 495 if (bad_address(pud))
496 goto bad;
497
249 printk("PUD %lx ", pud_val(*pud)); 498 printk("PUD %lx ", pud_val(*pud));
250 if (!pud_present(*pud) || pud_large(*pud)) 499 if (!pud_present(*pud) || pud_large(*pud))
251 goto ret; 500 goto out;
252 501
253 pmd = pmd_offset(pud, address); 502 pmd = pmd_offset(pud, address);
254 if (bad_address(pmd)) goto bad; 503 if (bad_address(pmd))
504 goto bad;
505
255 printk("PMD %lx ", pmd_val(*pmd)); 506 printk("PMD %lx ", pmd_val(*pmd));
256 if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; 507 if (!pmd_present(*pmd) || pmd_large(*pmd))
508 goto out;
257 509
258 pte = pte_offset_kernel(pmd, address); 510 pte = pte_offset_kernel(pmd, address);
259 if (bad_address(pte)) goto bad; 511 if (bad_address(pte))
512 goto bad;
513
260 printk("PTE %lx", pte_val(*pte)); 514 printk("PTE %lx", pte_val(*pte));
261ret: 515out:
262 printk("\n"); 516 printk("\n");
263 return; 517 return;
264bad: 518bad:
265 printk("BAD\n"); 519 printk("BAD\n");
266#endif
267} 520}
268 521
269#ifdef CONFIG_X86_32 522#endif /* CONFIG_X86_64 */
270static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
271{
272 unsigned index = pgd_index(address);
273 pgd_t *pgd_k;
274 pud_t *pud, *pud_k;
275 pmd_t *pmd, *pmd_k;
276
277 pgd += index;
278 pgd_k = init_mm.pgd + index;
279
280 if (!pgd_present(*pgd_k))
281 return NULL;
282 523
283 /* 524/*
284 * set_pgd(pgd, *pgd_k); here would be useless on PAE 525 * Workaround for K8 erratum #93 & buggy BIOS.
285 * and redundant with the set_pmd() on non-PAE. As would 526 *
286 * set_pud. 527 * BIOS SMM functions are required to use a specific workaround
287 */ 528 * to avoid corruption of the 64bit RIP register on C stepping K8.
288 529 *
289 pud = pud_offset(pgd, address); 530 * A lot of BIOS that didn't get tested properly miss this.
290 pud_k = pud_offset(pgd_k, address); 531 *
291 if (!pud_present(*pud_k)) 532 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
292 return NULL; 533 * Try to work around it here.
293 534 *
294 pmd = pmd_offset(pud, address); 535 * Note we only handle faults in kernel here.
295 pmd_k = pmd_offset(pud_k, address); 536 * Does nothing on 32-bit.
296 if (!pmd_present(*pmd_k))
297 return NULL;
298 if (!pmd_present(*pmd)) {
299 set_pmd(pmd, *pmd_k);
300 arch_flush_lazy_mmu_mode();
301 } else
302 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
303 return pmd_k;
304}
305#endif
306
307#ifdef CONFIG_X86_64
308static const char errata93_warning[] =
309KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
310KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
311KERN_ERR "******* Please consider a BIOS update.\n"
312KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
313#endif
314
315/* Workaround for K8 erratum #93 & buggy BIOS.
316 BIOS SMM functions are required to use a specific workaround
317 to avoid corruption of the 64bit RIP register on C stepping K8.
318 A lot of BIOS that didn't get tested properly miss this.
319 The OS sees this as a page fault with the upper 32bits of RIP cleared.
320 Try to work around it here.
321 Note we only handle faults in kernel here.
322 Does nothing for X86_32
323 */ 537 */
324static int is_errata93(struct pt_regs *regs, unsigned long address) 538static int is_errata93(struct pt_regs *regs, unsigned long address)
325{ 539{
326#ifdef CONFIG_X86_64 540#ifdef CONFIG_X86_64
327 static int warned; 541 static int once;
542
328 if (address != regs->ip) 543 if (address != regs->ip)
329 return 0; 544 return 0;
545
330 if ((address >> 32) != 0) 546 if ((address >> 32) != 0)
331 return 0; 547 return 0;
548
332 address |= 0xffffffffUL << 32; 549 address |= 0xffffffffUL << 32;
333 if ((address >= (u64)_stext && address <= (u64)_etext) || 550 if ((address >= (u64)_stext && address <= (u64)_etext) ||
334 (address >= MODULES_VADDR && address <= MODULES_END)) { 551 (address >= MODULES_VADDR && address <= MODULES_END)) {
335 if (!warned) { 552 if (!once) {
336 printk(errata93_warning); 553 printk(errata93_warning);
337 warned = 1; 554 once = 1;
338 } 555 }
339 regs->ip = address; 556 regs->ip = address;
340 return 1; 557 return 1;
@@ -344,16 +561,17 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
344} 561}
345 562
346/* 563/*
347 * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal 564 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
348 * addresses >4GB. We catch this in the page fault handler because these 565 * to illegal addresses >4GB.
349 * addresses are not reachable. Just detect this case and return. Any code 566 *
567 * We catch this in the page fault handler because these addresses
568 * are not reachable. Just detect this case and return. Any code
350 * segment in LDT is compatibility mode. 569 * segment in LDT is compatibility mode.
351 */ 570 */
352static int is_errata100(struct pt_regs *regs, unsigned long address) 571static int is_errata100(struct pt_regs *regs, unsigned long address)
353{ 572{
354#ifdef CONFIG_X86_64 573#ifdef CONFIG_X86_64
355 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && 574 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
356 (address >> 32))
357 return 1; 575 return 1;
358#endif 576#endif
359 return 0; 577 return 0;
@@ -363,8 +581,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
363{ 581{
364#ifdef CONFIG_X86_F00F_BUG 582#ifdef CONFIG_X86_F00F_BUG
365 unsigned long nr; 583 unsigned long nr;
584
366 /* 585 /*
367 * Pentium F0 0F C7 C8 bug workaround. 586 * Pentium F0 0F C7 C8 bug workaround:
368 */ 587 */
369 if (boot_cpu_data.f00f_bug) { 588 if (boot_cpu_data.f00f_bug) {
370 nr = (address - idt_descr.address) >> 3; 589 nr = (address - idt_descr.address) >> 3;
@@ -378,80 +597,87 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
378 return 0; 597 return 0;
379} 598}
380 599
381static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, 600static const char nx_warning[] = KERN_CRIT
382 unsigned long address) 601"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
602
603static void
604show_fault_oops(struct pt_regs *regs, unsigned long error_code,
605 unsigned long address)
383{ 606{
384#ifdef CONFIG_X86_32
385 if (!oops_may_print()) 607 if (!oops_may_print())
386 return; 608 return;
387#endif
388 609
389#ifdef CONFIG_X86_PAE
390 if (error_code & PF_INSTR) { 610 if (error_code & PF_INSTR) {
391 unsigned int level; 611 unsigned int level;
612
392 pte_t *pte = lookup_address(address, &level); 613 pte_t *pte = lookup_address(address, &level);
393 614
394 if (pte && pte_present(*pte) && !pte_exec(*pte)) 615 if (pte && pte_present(*pte) && !pte_exec(*pte))
395 printk(KERN_CRIT "kernel tried to execute " 616 printk(nx_warning, current_uid());
396 "NX-protected page - exploit attempt? "
397 "(uid: %d)\n", current_uid());
398 } 617 }
399#endif
400 618
401 printk(KERN_ALERT "BUG: unable to handle kernel "); 619 printk(KERN_ALERT "BUG: unable to handle kernel ");
402 if (address < PAGE_SIZE) 620 if (address < PAGE_SIZE)
403 printk(KERN_CONT "NULL pointer dereference"); 621 printk(KERN_CONT "NULL pointer dereference");
404 else 622 else
405 printk(KERN_CONT "paging request"); 623 printk(KERN_CONT "paging request");
624
406 printk(KERN_CONT " at %p\n", (void *) address); 625 printk(KERN_CONT " at %p\n", (void *) address);
407 printk(KERN_ALERT "IP:"); 626 printk(KERN_ALERT "IP:");
408 printk_address(regs->ip, 1); 627 printk_address(regs->ip, 1);
628
409 dump_pagetable(address); 629 dump_pagetable(address);
410} 630}
411 631
412#ifdef CONFIG_X86_64 632static noinline void
413static noinline void pgtable_bad(struct pt_regs *regs, 633pgtable_bad(struct pt_regs *regs, unsigned long error_code,
414 unsigned long error_code, unsigned long address) 634 unsigned long address)
415{ 635{
416 unsigned long flags = oops_begin(); 636 struct task_struct *tsk;
417 int sig = SIGKILL; 637 unsigned long flags;
418 struct task_struct *tsk = current; 638 int sig;
639
640 flags = oops_begin();
641 tsk = current;
642 sig = SIGKILL;
419 643
420 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 644 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
421 tsk->comm, address); 645 tsk->comm, address);
422 dump_pagetable(address); 646 dump_pagetable(address);
423 tsk->thread.cr2 = address; 647
424 tsk->thread.trap_no = 14; 648 tsk->thread.cr2 = address;
425 tsk->thread.error_code = error_code; 649 tsk->thread.trap_no = 14;
650 tsk->thread.error_code = error_code;
651
426 if (__die("Bad pagetable", regs, error_code)) 652 if (__die("Bad pagetable", regs, error_code))
427 sig = 0; 653 sig = 0;
654
428 oops_end(flags, regs, sig); 655 oops_end(flags, regs, sig);
429} 656}
430#endif
431 657
432static noinline void no_context(struct pt_regs *regs, 658static noinline void
433 unsigned long error_code, unsigned long address) 659no_context(struct pt_regs *regs, unsigned long error_code,
660 unsigned long address)
434{ 661{
435 struct task_struct *tsk = current; 662 struct task_struct *tsk = current;
436 unsigned long *stackend; 663 unsigned long *stackend;
437
438#ifdef CONFIG_X86_64
439 unsigned long flags; 664 unsigned long flags;
440 int sig; 665 int sig;
441#endif
442 666
443 /* Are we prepared to handle this kernel fault? */ 667 /* Are we prepared to handle this kernel fault? */
444 if (fixup_exception(regs)) 668 if (fixup_exception(regs))
445 return; 669 return;
446 670
447 /* 671 /*
448 * X86_32 672 * 32-bit:
449 * Valid to do another page fault here, because if this fault 673 *
450 * had been triggered by is_prefetch fixup_exception would have 674 * Valid to do another page fault here, because if this fault
451 * handled it. 675 * had been triggered by is_prefetch fixup_exception would have
676 * handled it.
677 *
678 * 64-bit:
452 * 679 *
453 * X86_64 680 * Hall of shame of CPU/BIOS bugs.
454 * Hall of shame of CPU/BIOS bugs.
455 */ 681 */
456 if (is_prefetch(regs, error_code, address)) 682 if (is_prefetch(regs, error_code, address))
457 return; 683 return;
@@ -461,54 +687,70 @@ static noinline void no_context(struct pt_regs *regs,
461 687
462 /* 688 /*
463 * Oops. The kernel tried to access some bad page. We'll have to 689 * Oops. The kernel tried to access some bad page. We'll have to
464 * terminate things with extreme prejudice. 690 * terminate things with extreme prejudice:
465 */ 691 */
466#ifdef CONFIG_X86_32
467 bust_spinlocks(1);
468#else
469 flags = oops_begin(); 692 flags = oops_begin();
470#endif
471 693
472 show_fault_oops(regs, error_code, address); 694 show_fault_oops(regs, error_code, address);
473 695
474 stackend = end_of_stack(tsk); 696 stackend = end_of_stack(tsk);
475 if (*stackend != STACK_END_MAGIC) 697 if (*stackend != STACK_END_MAGIC)
476 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 698 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
477 699
478 tsk->thread.cr2 = address; 700 tsk->thread.cr2 = address;
479 tsk->thread.trap_no = 14; 701 tsk->thread.trap_no = 14;
480 tsk->thread.error_code = error_code; 702 tsk->thread.error_code = error_code;
481 703
482#ifdef CONFIG_X86_32
483 die("Oops", regs, error_code);
484 bust_spinlocks(0);
485 do_exit(SIGKILL);
486#else
487 sig = SIGKILL; 704 sig = SIGKILL;
488 if (__die("Oops", regs, error_code)) 705 if (__die("Oops", regs, error_code))
489 sig = 0; 706 sig = 0;
707
490 /* Executive summary in case the body of the oops scrolled away */ 708 /* Executive summary in case the body of the oops scrolled away */
491 printk(KERN_EMERG "CR2: %016lx\n", address); 709 printk(KERN_EMERG "CR2: %016lx\n", address);
710
492 oops_end(flags, regs, sig); 711 oops_end(flags, regs, sig);
493#endif
494} 712}
495 713
496static void __bad_area_nosemaphore(struct pt_regs *regs, 714/*
497 unsigned long error_code, unsigned long address, 715 * Print out info about fatal segfaults, if the show_unhandled_signals
498 int si_code) 716 * sysctl is set:
717 */
718static inline void
719show_signal_msg(struct pt_regs *regs, unsigned long error_code,
720 unsigned long address, struct task_struct *tsk)
721{
722 if (!unhandled_signal(tsk, SIGSEGV))
723 return;
724
725 if (!printk_ratelimit())
726 return;
727
728 printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
729 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
730 tsk->comm, task_pid_nr(tsk), address,
731 (void *)regs->ip, (void *)regs->sp, error_code);
732
733 print_vma_addr(KERN_CONT " in ", regs->ip);
734
735 printk(KERN_CONT "\n");
736}
737
738static void
739__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
740 unsigned long address, int si_code)
499{ 741{
500 struct task_struct *tsk = current; 742 struct task_struct *tsk = current;
501 743
502 /* User mode accesses just cause a SIGSEGV */ 744 /* User mode accesses just cause a SIGSEGV */
503 if (error_code & PF_USER) { 745 if (error_code & PF_USER) {
504 /* 746 /*
505 * It's possible to have interrupts off here. 747 * It's possible to have interrupts off here:
506 */ 748 */
507 local_irq_enable(); 749 local_irq_enable();
508 750
509 /* 751 /*
510 * Valid to do another page fault here because this one came 752 * Valid to do another page fault here because this one came
511 * from user space. 753 * from user space:
512 */ 754 */
513 if (is_prefetch(regs, error_code, address)) 755 if (is_prefetch(regs, error_code, address))
514 return; 756 return;
@@ -516,22 +758,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs,
516 if (is_errata100(regs, address)) 758 if (is_errata100(regs, address))
517 return; 759 return;
518 760
519 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 761 if (unlikely(show_unhandled_signals))
520 printk_ratelimit()) { 762 show_signal_msg(regs, error_code, address, tsk);
521 printk( 763
522 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 764 /* Kernel addresses are always protection faults: */
523 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 765 tsk->thread.cr2 = address;
524 tsk->comm, task_pid_nr(tsk), address, 766 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
525 (void *) regs->ip, (void *) regs->sp, error_code); 767 tsk->thread.trap_no = 14;
526 print_vma_addr(" in ", regs->ip);
527 printk("\n");
528 }
529 768
530 tsk->thread.cr2 = address;
531 /* Kernel addresses are always protection faults */
532 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
533 tsk->thread.trap_no = 14;
534 force_sig_info_fault(SIGSEGV, si_code, address, tsk); 769 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
770
535 return; 771 return;
536 } 772 }
537 773
@@ -541,15 +777,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs,
541 no_context(regs, error_code, address); 777 no_context(regs, error_code, address);
542} 778}
543 779
544static noinline void bad_area_nosemaphore(struct pt_regs *regs, 780static noinline void
545 unsigned long error_code, unsigned long address) 781bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
782 unsigned long address)
546{ 783{
547 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 784 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
548} 785}
549 786
550static void __bad_area(struct pt_regs *regs, 787static void
551 unsigned long error_code, unsigned long address, 788__bad_area(struct pt_regs *regs, unsigned long error_code,
552 int si_code) 789 unsigned long address, int si_code)
553{ 790{
554 struct mm_struct *mm = current->mm; 791 struct mm_struct *mm = current->mm;
555 792
@@ -562,67 +799,75 @@ static void __bad_area(struct pt_regs *regs,
562 __bad_area_nosemaphore(regs, error_code, address, si_code); 799 __bad_area_nosemaphore(regs, error_code, address, si_code);
563} 800}
564 801
565static noinline void bad_area(struct pt_regs *regs, 802static noinline void
566 unsigned long error_code, unsigned long address) 803bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
567{ 804{
568 __bad_area(regs, error_code, address, SEGV_MAPERR); 805 __bad_area(regs, error_code, address, SEGV_MAPERR);
569} 806}
570 807
571static noinline void bad_area_access_error(struct pt_regs *regs, 808static noinline void
572 unsigned long error_code, unsigned long address) 809bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
810 unsigned long address)
573{ 811{
574 __bad_area(regs, error_code, address, SEGV_ACCERR); 812 __bad_area(regs, error_code, address, SEGV_ACCERR);
575} 813}
576 814
577/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ 815/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
578static void out_of_memory(struct pt_regs *regs, 816static void
579 unsigned long error_code, unsigned long address) 817out_of_memory(struct pt_regs *regs, unsigned long error_code,
818 unsigned long address)
580{ 819{
581 /* 820 /*
582 * We ran out of memory, call the OOM killer, and return the userspace 821 * We ran out of memory, call the OOM killer, and return the userspace
583 * (which will retry the fault, or kill us if we got oom-killed). 822 * (which will retry the fault, or kill us if we got oom-killed):
584 */ 823 */
585 up_read(&current->mm->mmap_sem); 824 up_read(&current->mm->mmap_sem);
825
586 pagefault_out_of_memory(); 826 pagefault_out_of_memory();
587} 827}
588 828
589static void do_sigbus(struct pt_regs *regs, 829static void
590 unsigned long error_code, unsigned long address) 830do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
591{ 831{
592 struct task_struct *tsk = current; 832 struct task_struct *tsk = current;
593 struct mm_struct *mm = tsk->mm; 833 struct mm_struct *mm = tsk->mm;
594 834
595 up_read(&mm->mmap_sem); 835 up_read(&mm->mmap_sem);
596 836
597 /* Kernel mode? Handle exceptions or die */ 837 /* Kernel mode? Handle exceptions or die: */
598 if (!(error_code & PF_USER)) 838 if (!(error_code & PF_USER))
599 no_context(regs, error_code, address); 839 no_context(regs, error_code, address);
600#ifdef CONFIG_X86_32 840
601 /* User space => ok to do another page fault */ 841 /* User-space => ok to do another page fault: */
602 if (is_prefetch(regs, error_code, address)) 842 if (is_prefetch(regs, error_code, address))
603 return; 843 return;
604#endif 844
605 tsk->thread.cr2 = address; 845 tsk->thread.cr2 = address;
606 tsk->thread.error_code = error_code; 846 tsk->thread.error_code = error_code;
607 tsk->thread.trap_no = 14; 847 tsk->thread.trap_no = 14;
848
608 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); 849 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
609} 850}
610 851
611static noinline void mm_fault_error(struct pt_regs *regs, 852static noinline void
612 unsigned long error_code, unsigned long address, unsigned int fault) 853mm_fault_error(struct pt_regs *regs, unsigned long error_code,
854 unsigned long address, unsigned int fault)
613{ 855{
614 if (fault & VM_FAULT_OOM) 856 if (fault & VM_FAULT_OOM) {
615 out_of_memory(regs, error_code, address); 857 out_of_memory(regs, error_code, address);
616 else if (fault & VM_FAULT_SIGBUS) 858 } else {
617 do_sigbus(regs, error_code, address); 859 if (fault & VM_FAULT_SIGBUS)
618 else 860 do_sigbus(regs, error_code, address);
619 BUG(); 861 else
862 BUG();
863 }
620} 864}
621 865
622static int spurious_fault_check(unsigned long error_code, pte_t *pte) 866static int spurious_fault_check(unsigned long error_code, pte_t *pte)
623{ 867{
624 if ((error_code & PF_WRITE) && !pte_write(*pte)) 868 if ((error_code & PF_WRITE) && !pte_write(*pte))
625 return 0; 869 return 0;
870
626 if ((error_code & PF_INSTR) && !pte_exec(*pte)) 871 if ((error_code & PF_INSTR) && !pte_exec(*pte))
627 return 0; 872 return 0;
628 873
@@ -630,21 +875,25 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
630} 875}
631 876
632/* 877/*
633 * Handle a spurious fault caused by a stale TLB entry. This allows 878 * Handle a spurious fault caused by a stale TLB entry.
634 * us to lazily refresh the TLB when increasing the permissions of a 879 *
635 * kernel page (RO -> RW or NX -> X). Doing it eagerly is very 880 * This allows us to lazily refresh the TLB when increasing the
636 * expensive since that implies doing a full cross-processor TLB 881 * permissions of a kernel page (RO -> RW or NX -> X). Doing it
637 * flush, even if no stale TLB entries exist on other processors. 882 * eagerly is very expensive since that implies doing a full
883 * cross-processor TLB flush, even if no stale TLB entries exist
884 * on other processors.
885 *
638 * There are no security implications to leaving a stale TLB when 886 * There are no security implications to leaving a stale TLB when
639 * increasing the permissions on a page. 887 * increasing the permissions on a page.
640 */ 888 */
641static noinline int spurious_fault(unsigned long error_code, 889static noinline int
642 unsigned long address) 890spurious_fault(unsigned long error_code, unsigned long address)
643{ 891{
644 pgd_t *pgd; 892 pgd_t *pgd;
645 pud_t *pud; 893 pud_t *pud;
646 pmd_t *pmd; 894 pmd_t *pmd;
647 pte_t *pte; 895 pte_t *pte;
896 int ret;
648 897
649 /* Reserved-bit violation or user access to kernel space? */ 898 /* Reserved-bit violation or user access to kernel space? */
650 if (error_code & (PF_USER | PF_RSVD)) 899 if (error_code & (PF_USER | PF_RSVD))
@@ -672,123 +921,46 @@ static noinline int spurious_fault(unsigned long error_code,
672 if (!pte_present(*pte)) 921 if (!pte_present(*pte))
673 return 0; 922 return 0;
674 923
675 return spurious_fault_check(error_code, pte); 924 ret = spurious_fault_check(error_code, pte);
676} 925 if (!ret)
677 926 return 0;
678/*
679 * X86_32
680 * Handle a fault on the vmalloc or module mapping area
681 *
682 * X86_64
683 * Handle a fault on the vmalloc area
684 *
685 * This assumes no large pages in there.
686 */
687static noinline int vmalloc_fault(unsigned long address)
688{
689#ifdef CONFIG_X86_32
690 unsigned long pgd_paddr;
691 pmd_t *pmd_k;
692 pte_t *pte_k;
693
694 /* Make sure we are in vmalloc area */
695 if (!(address >= VMALLOC_START && address < VMALLOC_END))
696 return -1;
697 927
698 /* 928 /*
699 * Synchronize this task's top level page-table 929 * Make sure we have permissions in PMD.
700 * with the 'reference' page table. 930 * If not, then there's a bug in the page tables:
701 *
702 * Do _not_ use "current" here. We might be inside
703 * an interrupt in the middle of a task switch..
704 */ 931 */
705 pgd_paddr = read_cr3(); 932 ret = spurious_fault_check(error_code, (pte_t *) pmd);
706 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 933 WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
707 if (!pmd_k)
708 return -1;
709 pte_k = pte_offset_kernel(pmd_k, address);
710 if (!pte_present(*pte_k))
711 return -1;
712 return 0;
713#else
714 pgd_t *pgd, *pgd_ref;
715 pud_t *pud, *pud_ref;
716 pmd_t *pmd, *pmd_ref;
717 pte_t *pte, *pte_ref;
718 934
719 /* Make sure we are in vmalloc area */ 935 return ret;
720 if (!(address >= VMALLOC_START && address < VMALLOC_END))
721 return -1;
722
723 /* Copy kernel mappings over when needed. This can also
724 happen within a race in page table update. In the later
725 case just flush. */
726
727 pgd = pgd_offset(current->active_mm, address);
728 pgd_ref = pgd_offset_k(address);
729 if (pgd_none(*pgd_ref))
730 return -1;
731 if (pgd_none(*pgd))
732 set_pgd(pgd, *pgd_ref);
733 else
734 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
735
736 /* Below here mismatches are bugs because these lower tables
737 are shared */
738
739 pud = pud_offset(pgd, address);
740 pud_ref = pud_offset(pgd_ref, address);
741 if (pud_none(*pud_ref))
742 return -1;
743 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
744 BUG();
745 pmd = pmd_offset(pud, address);
746 pmd_ref = pmd_offset(pud_ref, address);
747 if (pmd_none(*pmd_ref))
748 return -1;
749 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
750 BUG();
751 pte_ref = pte_offset_kernel(pmd_ref, address);
752 if (!pte_present(*pte_ref))
753 return -1;
754 pte = pte_offset_kernel(pmd, address);
755 /* Don't use pte_page here, because the mappings can point
756 outside mem_map, and the NUMA hash lookup cannot handle
757 that. */
758 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
759 BUG();
760 return 0;
761#endif
762} 936}
763 937
764int show_unhandled_signals = 1; 938int show_unhandled_signals = 1;
765 939
766static inline int access_error(unsigned long error_code, int write, 940static inline int
767 struct vm_area_struct *vma) 941access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
768{ 942{
769 if (write) { 943 if (write) {
770 /* write, present and write, not present */ 944 /* write, present and write, not present: */
771 if (unlikely(!(vma->vm_flags & VM_WRITE))) 945 if (unlikely(!(vma->vm_flags & VM_WRITE)))
772 return 1; 946 return 1;
773 } else if (unlikely(error_code & PF_PROT)) { 947 return 0;
774 /* read, present */
775 return 1;
776 } else {
777 /* read, not present */
778 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
779 return 1;
780 } 948 }
781 949
950 /* read, present: */
951 if (unlikely(error_code & PF_PROT))
952 return 1;
953
954 /* read, not present: */
955 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
956 return 1;
957
782 return 0; 958 return 0;
783} 959}
784 960
785static int fault_in_kernel_space(unsigned long address) 961static int fault_in_kernel_space(unsigned long address)
786{ 962{
787#ifdef CONFIG_X86_32 963 return address >= TASK_SIZE_MAX;
788 return address >= TASK_SIZE;
789#else /* !CONFIG_X86_32 */
790 return address >= TASK_SIZE64;
791#endif /* CONFIG_X86_32 */
792} 964}
793 965
794/* 966/*
@@ -796,23 +968,22 @@ static int fault_in_kernel_space(unsigned long address)
796 * and the problem, and then passes it off to one of the appropriate 968 * and the problem, and then passes it off to one of the appropriate
797 * routines. 969 * routines.
798 */ 970 */
799#ifdef CONFIG_X86_64 971dotraplinkage void __kprobes
800asmlinkage 972do_page_fault(struct pt_regs *regs, unsigned long error_code)
801#endif
802void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
803{ 973{
804 unsigned long address; 974 struct vm_area_struct *vma;
805 struct task_struct *tsk; 975 struct task_struct *tsk;
976 unsigned long address;
806 struct mm_struct *mm; 977 struct mm_struct *mm;
807 struct vm_area_struct *vma;
808 int write; 978 int write;
809 int fault; 979 int fault;
810 980
811 tsk = current; 981 tsk = current;
812 mm = tsk->mm; 982 mm = tsk->mm;
983
813 prefetchw(&mm->mmap_sem); 984 prefetchw(&mm->mmap_sem);
814 985
815 /* get the address */ 986 /* Get the faulting address: */
816 address = read_cr2(); 987 address = read_cr2();
817 988
818 if (unlikely(kmmio_fault(regs, address))) 989 if (unlikely(kmmio_fault(regs, address)))
@@ -836,22 +1007,23 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
836 vmalloc_fault(address) >= 0) 1007 vmalloc_fault(address) >= 0)
837 return; 1008 return;
838 1009
839 /* Can handle a stale RO->RW TLB */ 1010 /* Can handle a stale RO->RW TLB: */
840 if (spurious_fault(error_code, address)) 1011 if (spurious_fault(error_code, address))
841 return; 1012 return;
842 1013
843 /* kprobes don't want to hook the spurious faults. */ 1014 /* kprobes don't want to hook the spurious faults: */
844 if (notify_page_fault(regs)) 1015 if (notify_page_fault(regs))
845 return; 1016 return;
846 /* 1017 /*
847 * Don't take the mm semaphore here. If we fixup a prefetch 1018 * Don't take the mm semaphore here. If we fixup a prefetch
848 * fault we could otherwise deadlock. 1019 * fault we could otherwise deadlock:
849 */ 1020 */
850 bad_area_nosemaphore(regs, error_code, address); 1021 bad_area_nosemaphore(regs, error_code, address);
1022
851 return; 1023 return;
852 } 1024 }
853 1025
854 /* kprobes don't want to hook the spurious faults. */ 1026 /* kprobes don't want to hook the spurious faults: */
855 if (unlikely(notify_page_fault(regs))) 1027 if (unlikely(notify_page_fault(regs)))
856 return; 1028 return;
857 /* 1029 /*
@@ -859,22 +1031,22 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
859 * vmalloc fault has been handled. 1031 * vmalloc fault has been handled.
860 * 1032 *
861 * User-mode registers count as a user access even for any 1033 * User-mode registers count as a user access even for any
862 * potential system fault or CPU buglet. 1034 * potential system fault or CPU buglet:
863 */ 1035 */
864 if (user_mode_vm(regs)) { 1036 if (user_mode_vm(regs)) {
865 local_irq_enable(); 1037 local_irq_enable();
866 error_code |= PF_USER; 1038 error_code |= PF_USER;
867 } else if (regs->flags & X86_EFLAGS_IF) 1039 } else {
868 local_irq_enable(); 1040 if (regs->flags & X86_EFLAGS_IF)
1041 local_irq_enable();
1042 }
869 1043
870#ifdef CONFIG_X86_64
871 if (unlikely(error_code & PF_RSVD)) 1044 if (unlikely(error_code & PF_RSVD))
872 pgtable_bad(regs, error_code, address); 1045 pgtable_bad(regs, error_code, address);
873#endif
874 1046
875 /* 1047 /*
876 * If we're in an interrupt, have no user context or are running in an 1048 * If we're in an interrupt, have no user context or are running
877 * atomic region then we must not take the fault. 1049 * in an atomic region then we must not take the fault:
878 */ 1050 */
879 if (unlikely(in_atomic() || !mm)) { 1051 if (unlikely(in_atomic() || !mm)) {
880 bad_area_nosemaphore(regs, error_code, address); 1052 bad_area_nosemaphore(regs, error_code, address);
@@ -883,19 +1055,19 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
883 1055
884 /* 1056 /*
885 * When running in the kernel we expect faults to occur only to 1057 * When running in the kernel we expect faults to occur only to
886 * addresses in user space. All other faults represent errors in the 1058 * addresses in user space. All other faults represent errors in
887 * kernel and should generate an OOPS. Unfortunately, in the case of an 1059 * the kernel and should generate an OOPS. Unfortunately, in the
888 * erroneous fault occurring in a code path which already holds mmap_sem 1060 * case of an erroneous fault occurring in a code path which already
889 * we will deadlock attempting to validate the fault against the 1061 * holds mmap_sem we will deadlock attempting to validate the fault
890 * address space. Luckily the kernel only validly references user 1062 * against the address space. Luckily the kernel only validly
891 * space from well defined areas of code, which are listed in the 1063 * references user space from well defined areas of code, which are
892 * exceptions table. 1064 * listed in the exceptions table.
893 * 1065 *
894 * As the vast majority of faults will be valid we will only perform 1066 * As the vast majority of faults will be valid we will only perform
895 * the source reference check when there is a possibility of a deadlock. 1067 * the source reference check when there is a possibility of a
896 * Attempt to lock the address space, if we cannot we then validate the 1068 * deadlock. Attempt to lock the address space, if we cannot we then
897 * source. If this is invalid we can skip the address space check, 1069 * validate the source. If this is invalid we can skip the address
898 * thus avoiding the deadlock. 1070 * space check, thus avoiding the deadlock:
899 */ 1071 */
900 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1072 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
901 if ((error_code & PF_USER) == 0 && 1073 if ((error_code & PF_USER) == 0 &&
@@ -906,8 +1078,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
906 down_read(&mm->mmap_sem); 1078 down_read(&mm->mmap_sem);
907 } else { 1079 } else {
908 /* 1080 /*
909 * The above down_read_trylock() might have succeeded in which 1081 * The above down_read_trylock() might have succeeded in
910 * case we'll have missed the might_sleep() from down_read(). 1082 * which case we'll have missed the might_sleep() from
1083 * down_read():
911 */ 1084 */
912 might_sleep(); 1085 might_sleep();
913 } 1086 }
@@ -927,7 +1100,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
927 /* 1100 /*
928 * Accessing the stack below %sp is always a bug. 1101 * Accessing the stack below %sp is always a bug.
929 * The large cushion allows instructions like enter 1102 * The large cushion allows instructions like enter
930 * and pusha to work. ("enter $65535,$31" pushes 1103 * and pusha to work. ("enter $65535, $31" pushes
931 * 32 pointers and then decrements %sp by 65535.) 1104 * 32 pointers and then decrements %sp by 65535.)
932 */ 1105 */
933 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 1106 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
@@ -946,6 +1119,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
946 */ 1119 */
947good_area: 1120good_area:
948 write = error_code & PF_WRITE; 1121 write = error_code & PF_WRITE;
1122
949 if (unlikely(access_error(error_code, write, vma))) { 1123 if (unlikely(access_error(error_code, write, vma))) {
950 bad_area_access_error(regs, error_code, address); 1124 bad_area_access_error(regs, error_code, address);
951 return; 1125 return;
@@ -954,75 +1128,21 @@ good_area:
954 /* 1128 /*
955 * If for any reason at all we couldn't handle the fault, 1129 * If for any reason at all we couldn't handle the fault,
956 * make sure we exit gracefully rather than endlessly redo 1130 * make sure we exit gracefully rather than endlessly redo
957 * the fault. 1131 * the fault:
958 */ 1132 */
959 fault = handle_mm_fault(mm, vma, address, write); 1133 fault = handle_mm_fault(mm, vma, address, write);
1134
960 if (unlikely(fault & VM_FAULT_ERROR)) { 1135 if (unlikely(fault & VM_FAULT_ERROR)) {
961 mm_fault_error(regs, error_code, address, fault); 1136 mm_fault_error(regs, error_code, address, fault);
962 return; 1137 return;
963 } 1138 }
1139
964 if (fault & VM_FAULT_MAJOR) 1140 if (fault & VM_FAULT_MAJOR)
965 tsk->maj_flt++; 1141 tsk->maj_flt++;
966 else 1142 else
967 tsk->min_flt++; 1143 tsk->min_flt++;
968 1144
969#ifdef CONFIG_X86_32 1145 check_v8086_mode(regs, address, tsk);
970 /*
971 * Did it hit the DOS screen memory VA from vm86 mode?
972 */
973 if (v8086_mode(regs)) {
974 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
975 if (bit < 32)
976 tsk->thread.screen_bitmap |= 1 << bit;
977 }
978#endif
979 up_read(&mm->mmap_sem);
980}
981
982DEFINE_SPINLOCK(pgd_lock);
983LIST_HEAD(pgd_list);
984 1146
985void vmalloc_sync_all(void) 1147 up_read(&mm->mmap_sem);
986{
987 unsigned long address;
988
989#ifdef CONFIG_X86_32
990 if (SHARED_KERNEL_PMD)
991 return;
992
993 for (address = VMALLOC_START & PMD_MASK;
994 address >= TASK_SIZE && address < FIXADDR_TOP;
995 address += PMD_SIZE) {
996 unsigned long flags;
997 struct page *page;
998
999 spin_lock_irqsave(&pgd_lock, flags);
1000 list_for_each_entry(page, &pgd_list, lru) {
1001 if (!vmalloc_sync_one(page_address(page),
1002 address))
1003 break;
1004 }
1005 spin_unlock_irqrestore(&pgd_lock, flags);
1006 }
1007#else /* CONFIG_X86_64 */
1008 for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
1009 address += PGDIR_SIZE) {
1010 const pgd_t *pgd_ref = pgd_offset_k(address);
1011 unsigned long flags;
1012 struct page *page;
1013
1014 if (pgd_none(*pgd_ref))
1015 continue;
1016 spin_lock_irqsave(&pgd_lock, flags);
1017 list_for_each_entry(page, &pgd_list, lru) {
1018 pgd_t *pgd;
1019 pgd = (pgd_t *)page_address(page) + pgd_index(address);
1020 if (pgd_none(*pgd))
1021 set_pgd(pgd, *pgd_ref);
1022 else
1023 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
1024 }
1025 spin_unlock_irqrestore(&pgd_lock, flags);
1026 }
1027#endif
1028} 1148}
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index ca53224fc56c..d5e28424622c 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -20,6 +20,64 @@
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23#ifdef CONFIG_X86_PAE
24static int
25is_io_mapping_possible(resource_size_t base, unsigned long size)
26{
27 return 1;
28}
29#else
30static int
31is_io_mapping_possible(resource_size_t base, unsigned long size)
32{
33 /* There is no way to map greater than 1 << 32 address without PAE */
34 if (base + size > 0x100000000ULL)
35 return 0;
36
37 return 1;
38}
39#endif
40
41int
42reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot)
43{
44 unsigned long ret_flag;
45
46 if (!is_io_mapping_possible(base, size))
47 goto out_err;
48
49 if (!pat_enabled) {
50 *prot = pgprot_noncached(PAGE_KERNEL);
51 return 0;
52 }
53
54 if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag))
55 goto out_err;
56
57 if (ret_flag == _PAGE_CACHE_WB)
58 goto out_free;
59
60 if (kernel_map_sync_memtype(base, size, ret_flag))
61 goto out_free;
62
63 *prot = __pgprot(__PAGE_KERNEL | ret_flag);
64 return 0;
65
66out_free:
67 free_memtype(base, base + size);
68out_err:
69 return -EINVAL;
70}
71EXPORT_SYMBOL_GPL(reserve_io_memtype_wc);
72
73void
74free_io_memtype(u64 base, unsigned long size)
75{
76 if (pat_enabled)
77 free_memtype(base, base + size);
78}
79EXPORT_SYMBOL_GPL(free_io_memtype);
80
23/* Map 'pfn' using fixed map 'type' and protections 'prot' 81/* Map 'pfn' using fixed map 'type' and protections 'prot'
24 */ 82 */
25void * 83void *
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 9cab18b0b857..0bcd7883d036 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,44 +9,44 @@
9 9
10#include <asm/e820.h> 10#include <asm/e820.h>
11 11
12static void __init memtest(unsigned long start_phys, unsigned long size, 12static u64 patterns[] __initdata = {
13 unsigned pattern) 13 0,
14 0xffffffffffffffffULL,
15 0x5555555555555555ULL,
16 0xaaaaaaaaaaaaaaaaULL,
17 0x1111111111111111ULL,
18 0x2222222222222222ULL,
19 0x4444444444444444ULL,
20 0x8888888888888888ULL,
21 0x3333333333333333ULL,
22 0x6666666666666666ULL,
23 0x9999999999999999ULL,
24 0xccccccccccccccccULL,
25 0x7777777777777777ULL,
26 0xbbbbbbbbbbbbbbbbULL,
27 0xddddddddddddddddULL,
28 0xeeeeeeeeeeeeeeeeULL,
29 0x7a6c7258554e494cULL, /* yeah ;-) */
30};
31
32static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
14{ 33{
15 unsigned long i; 34 printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
16 unsigned long *start; 35 (unsigned long long) pattern,
17 unsigned long start_bad; 36 (unsigned long long) start_bad,
18 unsigned long last_bad; 37 (unsigned long long) end_bad);
19 unsigned long val; 38 reserve_early(start_bad, end_bad, "BAD RAM");
20 unsigned long start_phys_aligned; 39}
21 unsigned long count;
22 unsigned long incr;
23
24 switch (pattern) {
25 case 0:
26 val = 0UL;
27 break;
28 case 1:
29 val = -1UL;
30 break;
31 case 2:
32#ifdef CONFIG_X86_64
33 val = 0x5555555555555555UL;
34#else
35 val = 0x55555555UL;
36#endif
37 break;
38 case 3:
39#ifdef CONFIG_X86_64
40 val = 0xaaaaaaaaaaaaaaaaUL;
41#else
42 val = 0xaaaaaaaaUL;
43#endif
44 break;
45 default:
46 return;
47 }
48 40
49 incr = sizeof(unsigned long); 41static void __init memtest(u64 pattern, u64 start_phys, u64 size)
42{
43 u64 i, count;
44 u64 *start;
45 u64 start_bad, last_bad;
46 u64 start_phys_aligned;
47 size_t incr;
48
49 incr = sizeof(pattern);
50 start_phys_aligned = ALIGN(start_phys, incr); 50 start_phys_aligned = ALIGN(start_phys, incr);
51 count = (size - (start_phys_aligned - start_phys))/incr; 51 count = (size - (start_phys_aligned - start_phys))/incr;
52 start = __va(start_phys_aligned); 52 start = __va(start_phys_aligned);
@@ -54,25 +54,42 @@ static void __init memtest(unsigned long start_phys, unsigned long size,
54 last_bad = 0; 54 last_bad = 0;
55 55
56 for (i = 0; i < count; i++) 56 for (i = 0; i < count; i++)
57 start[i] = val; 57 start[i] = pattern;
58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { 58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
59 if (*start != val) { 59 if (*start == pattern)
60 if (start_phys_aligned == last_bad + incr) { 60 continue;
61 last_bad += incr; 61 if (start_phys_aligned == last_bad + incr) {
62 } else { 62 last_bad += incr;
63 if (start_bad) { 63 continue;
64 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved",
65 val, start_bad, last_bad + incr);
66 reserve_early(start_bad, last_bad + incr, "BAD RAM");
67 }
68 start_bad = last_bad = start_phys_aligned;
69 }
70 } 64 }
65 if (start_bad)
66 reserve_bad_mem(pattern, start_bad, last_bad + incr);
67 start_bad = last_bad = start_phys_aligned;
71 } 68 }
72 if (start_bad) { 69 if (start_bad)
73 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", 70 reserve_bad_mem(pattern, start_bad, last_bad + incr);
74 val, start_bad, last_bad + incr); 71}
75 reserve_early(start_bad, last_bad + incr, "BAD RAM"); 72
73static void __init do_one_pass(u64 pattern, u64 start, u64 end)
74{
75 u64 size = 0;
76
77 while (start < end) {
78 start = find_e820_area_size(start, &size, 1);
79
80 /* done ? */
81 if (start >= end)
82 break;
83 if (start + size > end)
84 size = end - start;
85
86 printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
87 (unsigned long long) start,
88 (unsigned long long) start + size,
89 (unsigned long long) cpu_to_be64(pattern));
90 memtest(pattern, start, size);
91
92 start += size;
76 } 93 }
77} 94}
78 95
@@ -90,33 +107,22 @@ early_param("memtest", parse_memtest);
90 107
91void __init early_memtest(unsigned long start, unsigned long end) 108void __init early_memtest(unsigned long start, unsigned long end)
92{ 109{
93 u64 t_start, t_size; 110 unsigned int i;
94 unsigned pattern; 111 unsigned int idx = 0;
95 112
96 if (!memtest_pattern) 113 if (!memtest_pattern)
97 return; 114 return;
98 115
99 printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); 116 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
100 for (pattern = 0; pattern < memtest_pattern; pattern++) { 117 for (i = 0; i < memtest_pattern; i++) {
101 t_start = start; 118 idx = i % ARRAY_SIZE(patterns);
102 t_size = 0; 119 do_one_pass(patterns[idx], start, end);
103 while (t_start < end) { 120 }
104 t_start = find_e820_area_size(t_start, &t_size, 1);
105
106 /* done ? */
107 if (t_start >= end)
108 break;
109 if (t_start + t_size > end)
110 t_size = end - t_start;
111
112 printk(KERN_CONT "\n %010llx - %010llx pattern %d",
113 (unsigned long long)t_start,
114 (unsigned long long)t_start + t_size, pattern);
115
116 memtest(t_start, t_size, pattern);
117 121
118 t_start += t_size; 122 if (idx > 0) {
119 } 123 printk(KERN_INFO "early_memtest: wipe out "
124 "test pattern from memory\n");
125 /* additional test with pattern 0 will do this */
126 do_one_pass(0, start, end);
120 } 127 }
121 printk(KERN_CONT "\n");
122} 128}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index d1f7439d173c..3957cd6d6454 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -194,7 +194,7 @@ void *alloc_remap(int nid, unsigned long size)
194 size = ALIGN(size, L1_CACHE_BYTES); 194 size = ALIGN(size, L1_CACHE_BYTES);
195 195
196 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) 196 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
197 return 0; 197 return NULL;
198 198
199 node_remap_alloc_vaddr[nid] += size; 199 node_remap_alloc_vaddr[nid] += size;
200 memset(allocation, 0, size); 200 memset(allocation, 0, size);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 7be47d1a97e4..8253bc97587e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -482,6 +482,13 @@ static int split_large_page(pte_t *kpte, unsigned long address)
482 pbase = (pte_t *)page_address(base); 482 pbase = (pte_t *)page_address(base);
483 paravirt_alloc_pte(&init_mm, page_to_pfn(base)); 483 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
484 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 484 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
485 /*
486 * If we ever want to utilize the PAT bit, we need to
487 * update this function to make sure it's converted from
488 * bit 12 to bit 7 when we cross from the 2MB level to
489 * the 4K level:
490 */
491 WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
485 492
486#ifdef CONFIG_X86_64 493#ifdef CONFIG_X86_64
487 if (level == PG_LEVEL_1G) { 494 if (level == PG_LEVEL_1G) {
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 05f9aef6818a..fdfedb65d45a 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -634,6 +634,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
634} 634}
635 635
636/* 636/*
637 * Change the memory type for the physial address range in kernel identity
638 * mapping space if that range is a part of identity map.
639 */
640int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
641{
642 unsigned long id_sz;
643
644 if (!pat_enabled || base >= __pa(high_memory))
645 return 0;
646
647 id_sz = (__pa(high_memory) < base + size) ?
648 __pa(high_memory) - base :
649 size;
650
651 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
652 printk(KERN_INFO
653 "%s:%d ioremap_change_attr failed %s "
654 "for %Lx-%Lx\n",
655 current->comm, current->pid,
656 cattr_name(flags),
657 base, (unsigned long long)(base + size));
658 return -EINVAL;
659 }
660 return 0;
661}
662
663/*
637 * Internal interface to reserve a range of physical memory with prot. 664 * Internal interface to reserve a range of physical memory with prot.
638 * Reserved non RAM regions only and after successful reserve_memtype, 665 * Reserved non RAM regions only and after successful reserve_memtype,
639 * this func also keeps identity mapping (if any) in sync with this new prot. 666 * this func also keeps identity mapping (if any) in sync with this new prot.
@@ -642,7 +669,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
642 int strict_prot) 669 int strict_prot)
643{ 670{
644 int is_ram = 0; 671 int is_ram = 0;
645 int id_sz, ret; 672 int ret;
646 unsigned long flags; 673 unsigned long flags;
647 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 674 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
648 675
@@ -679,23 +706,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
679 flags); 706 flags);
680 } 707 }
681 708
682 /* Need to keep identity mapping in sync */ 709 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
683 if (paddr >= __pa(high_memory))
684 return 0;
685
686 id_sz = (__pa(high_memory) < paddr + size) ?
687 __pa(high_memory) - paddr :
688 size;
689
690 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
691 free_memtype(paddr, paddr + size); 710 free_memtype(paddr, paddr + size);
692 printk(KERN_ERR
693 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
694 "for %Lx-%Lx\n",
695 current->comm, current->pid,
696 cattr_name(flags),
697 (unsigned long long)paddr,
698 (unsigned long long)(paddr + size));
699 return -EINVAL; 711 return -EINVAL;
700 } 712 }
701 return 0; 713 return 0;
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index d1e9b53f9d33..b641388d8286 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -8,7 +8,7 @@
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/segment.h> 10#include <asm/segment.h>
11#include <asm/page.h> 11#include <asm/page_types.h>
12#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
13#include <asm/processor-flags.h> 13#include <asm/processor-flags.h>
14 14
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 000415947d93..9356547d8c01 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -18,7 +18,7 @@
18 .text 18 .text
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <asm/segment.h> 20#include <asm/segment.h>
21#include <asm/page.h> 21#include <asm/page_types.h>
22#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
23#include <asm/processor-flags.h> 23#include <asm/processor-flags.h>
24 24
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 9c98cc6ba978..7133cdf9098b 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -85,8 +85,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
85 unsigned long addr, end; 85 unsigned long addr, end;
86 unsigned offset; 86 unsigned offset;
87 end = (start + PMD_SIZE - 1) & PMD_MASK; 87 end = (start + PMD_SIZE - 1) & PMD_MASK;
88 if (end >= TASK_SIZE64) 88 if (end >= TASK_SIZE_MAX)
89 end = TASK_SIZE64; 89 end = TASK_SIZE_MAX;
90 end -= len; 90 end -= len;
91 /* This loses some more bits than a modulo, but is cheaper */ 91 /* This loses some more bits than a modulo, but is cheaper */
92 offset = get_random_int() & (PTRS_PER_PTE - 1); 92 offset = get_random_int() & (PTRS_PER_PTE - 1);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 86497d5f44cd..c52f4034c7fd 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -940,6 +940,9 @@ asmlinkage void __init xen_start_kernel(void)
940 possible map and a non-dummy shared_info. */ 940 possible map and a non-dummy shared_info. */
941 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 941 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
942 942
943 local_irq_disable();
944 early_boot_irqs_off();
945
943 xen_raw_console_write("mapping kernel into physical memory\n"); 946 xen_raw_console_write("mapping kernel into physical memory\n");
944 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 947 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
945 948
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 63d49a523ed3..1a5ff24e29c0 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -8,7 +8,7 @@
8 8
9#include <asm/boot.h> 9#include <asm/boot.h>
10#include <asm/asm.h> 10#include <asm/asm.h>
11#include <asm/page.h> 11#include <asm/page_types.h>
12 12
13#include <xen/interface/elfnote.h> 13#include <xen/interface/elfnote.h>
14#include <asm/xen/interface.h> 14#include <asm/xen/interface.h>
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ba5292d69ebd..b2d1ee32cfe8 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -214,7 +214,7 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
214 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 214 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
215 "yes" : "no"); 215 "yes" : "no");
216 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 216 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
217 seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); 217 seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize);
218} 218}
219 219
220const struct crypto_type crypto_ahash_type = { 220const struct crypto_type crypto_ahash_type = {
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index d1dd5160daa9..2b6c59028254 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -272,7 +272,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
272} 272}
273EXPORT_SYMBOL_GPL(acpi_os_map_memory); 273EXPORT_SYMBOL_GPL(acpi_os_map_memory);
274 274
275void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 275void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
276{ 276{
277 if (acpi_gbl_permanent_mmap) 277 if (acpi_gbl_permanent_mmap)
278 iounmap(virt); 278 iounmap(virt);
@@ -281,7 +281,7 @@ void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
281} 281}
282EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 282EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
283 283
284void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 284void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
285{ 285{
286 if (!acpi_gbl_permanent_mmap) 286 if (!acpi_gbl_permanent_mmap)
287 __acpi_unmap_table(virt, size); 287 __acpi_unmap_table(virt, size);
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 144a49f15220..8733a2ea04c2 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -901,7 +901,7 @@ static int __devinit eeprom_read(struct lanai_dev *lanai)
901 clock_l(); udelay(5); 901 clock_l(); udelay(5);
902 for (i = 128; i != 0; i >>= 1) { /* write command out */ 902 for (i = 128; i != 0; i >>= 1) { /* write command out */
903 tmp = (lanai->conf1 & ~CONFIG1_PROMDATA) | 903 tmp = (lanai->conf1 & ~CONFIG1_PROMDATA) |
904 (data & i) ? CONFIG1_PROMDATA : 0; 904 ((data & i) ? CONFIG1_PROMDATA : 0);
905 if (lanai->conf1 != tmp) { 905 if (lanai->conf1 != tmp) {
906 set_config1(tmp); 906 set_config1(tmp);
907 udelay(5); /* Let new data settle */ 907 udelay(5); /* Let new data settle */
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index c98c31ec2f75..b428c8c4bc64 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -303,7 +303,6 @@ void sysdev_unregister(struct sys_device * sysdev)
303 * is guaranteed by virtue of the fact that child devices are registered 303 * is guaranteed by virtue of the fact that child devices are registered
304 * after their parents. 304 * after their parents.
305 */ 305 */
306
307void sysdev_shutdown(void) 306void sysdev_shutdown(void)
308{ 307{
309 struct sysdev_class * cls; 308 struct sysdev_class * cls;
@@ -363,7 +362,6 @@ static void __sysdev_resume(struct sys_device *dev)
363 * This is only called by the device PM core, so we let them handle 362 * This is only called by the device PM core, so we let them handle
364 * all synchronization. 363 * all synchronization.
365 */ 364 */
366
367int sysdev_suspend(pm_message_t state) 365int sysdev_suspend(pm_message_t state)
368{ 366{
369 struct sysdev_class * cls; 367 struct sysdev_class * cls;
@@ -432,7 +430,7 @@ aux_driver:
432 } 430 }
433 return ret; 431 return ret;
434} 432}
435 433EXPORT_SYMBOL_GPL(sysdev_suspend);
436 434
437/** 435/**
438 * sysdev_resume - Bring system devices back to life. 436 * sysdev_resume - Bring system devices back to life.
@@ -442,7 +440,6 @@ aux_driver:
442 * 440 *
443 * Note: Interrupts are disabled when called. 441 * Note: Interrupts are disabled when called.
444 */ 442 */
445
446int sysdev_resume(void) 443int sysdev_resume(void)
447{ 444{
448 struct sysdev_class * cls; 445 struct sysdev_class * cls;
@@ -463,7 +460,7 @@ int sysdev_resume(void)
463 } 460 }
464 return 0; 461 return 0;
465} 462}
466 463EXPORT_SYMBOL_GPL(sysdev_resume);
467 464
468int __init system_bus_init(void) 465int __init system_bus_init(void)
469{ 466{
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 81f1cff56fd5..6949c2d58f1d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
202 dev_priv->ring.map.flags = 0; 202 dev_priv->ring.map.flags = 0;
203 dev_priv->ring.map.mtrr = 0; 203 dev_priv->ring.map.mtrr = 0;
204 204
205 drm_core_ioremap(&dev_priv->ring.map, dev); 205 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
206 206
207 if (dev_priv->ring.map.handle == NULL) { 207 if (dev_priv->ring.map.handle == NULL) {
208 i915_dma_cleanup(dev); 208 i915_dma_cleanup(dev);
@@ -1090,6 +1090,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1090 dev_priv->mm.gtt_mapping = 1090 dev_priv->mm.gtt_mapping =
1091 io_mapping_create_wc(dev->agp->base, 1091 io_mapping_create_wc(dev->agp->base,
1092 dev->agp->agp_info.aper_size * 1024*1024); 1092 dev->agp->agp_info.aper_size * 1024*1024);
1093 if (dev_priv->mm.gtt_mapping == NULL) {
1094 ret = -EIO;
1095 goto out_rmmap;
1096 }
1097
1093 /* Set up a WC MTRR for non-PAT systems. This is more common than 1098 /* Set up a WC MTRR for non-PAT systems. This is more common than
1094 * one would think, because the kernel disables PAT on first 1099 * one would think, because the kernel disables PAT on first
1095 * generation Core chips because WC PAT gets overridden by a UC 1100 * generation Core chips because WC PAT gets overridden by a UC
@@ -1122,7 +1127,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1122 if (!I915_NEED_GFX_HWS(dev)) { 1127 if (!I915_NEED_GFX_HWS(dev)) {
1123 ret = i915_init_phys_hws(dev); 1128 ret = i915_init_phys_hws(dev);
1124 if (ret != 0) 1129 if (ret != 0)
1125 goto out_rmmap; 1130 goto out_iomapfree;
1126 } 1131 }
1127 1132
1128 /* On the 945G/GM, the chipset reports the MSI capability on the 1133 /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1161,6 +1166,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1161 1166
1162 return 0; 1167 return 0;
1163 1168
1169out_iomapfree:
1170 io_mapping_free(dev_priv->mm.gtt_mapping);
1164out_rmmap: 1171out_rmmap:
1165 iounmap(dev_priv->regs); 1172 iounmap(dev_priv->regs);
1166free_priv: 1173free_priv:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a31cbdbc3c54..b293ef0bae71 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -27,6 +27,7 @@
27 * 27 *
28 */ 28 */
29 29
30#include <linux/device.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
32#include "i915_drm.h" 33#include "i915_drm.h"
@@ -66,6 +67,14 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
66 67
67 i915_save_state(dev); 68 i915_save_state(dev);
68 69
70 /* If KMS is active, we do the leavevt stuff here */
71 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
72 if (i915_gem_idle(dev))
73 dev_err(&dev->pdev->dev,
74 "GEM idle failed, resume may fail\n");
75 drm_irq_uninstall(dev);
76 }
77
69 intel_opregion_free(dev); 78 intel_opregion_free(dev);
70 79
71 if (state.event == PM_EVENT_SUSPEND) { 80 if (state.event == PM_EVENT_SUSPEND) {
@@ -79,6 +88,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
79 88
80static int i915_resume(struct drm_device *dev) 89static int i915_resume(struct drm_device *dev)
81{ 90{
91 struct drm_i915_private *dev_priv = dev->dev_private;
92 int ret = 0;
93
82 pci_set_power_state(dev->pdev, PCI_D0); 94 pci_set_power_state(dev->pdev, PCI_D0);
83 pci_restore_state(dev->pdev); 95 pci_restore_state(dev->pdev);
84 if (pci_enable_device(dev->pdev)) 96 if (pci_enable_device(dev->pdev))
@@ -89,7 +101,20 @@ static int i915_resume(struct drm_device *dev)
89 101
90 intel_opregion_init(dev); 102 intel_opregion_init(dev);
91 103
92 return 0; 104 /* KMS EnterVT equivalent */
105 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
106 mutex_lock(&dev->struct_mutex);
107 dev_priv->mm.suspended = 0;
108
109 ret = i915_gem_init_ringbuffer(dev);
110 if (ret != 0)
111 ret = -1;
112 mutex_unlock(&dev->struct_mutex);
113
114 drm_irq_install(dev);
115 }
116
117 return ret;
93} 118}
94 119
95static struct vm_operations_struct i915_gem_vm_ops = { 120static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 135a08f615cd..17fa40858d26 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -618,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
618void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 618void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
619int i915_gem_do_init(struct drm_device *dev, unsigned long start, 619int i915_gem_do_init(struct drm_device *dev, unsigned long start,
620 unsigned long end); 620 unsigned long end);
621int i915_gem_idle(struct drm_device *dev);
621int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 622int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
622int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 623int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
623 int write); 624 int write);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ac534c9a2f81..e9882d0c2473 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,10 +34,6 @@
34 34
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36 36
37static void
38i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
41static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -215,7 +211,7 @@ fast_user_write(struct io_mapping *mapping,
215 211
216 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 212 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
217 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 213 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
218 user_data, length); 214 user_data, length, length);
219 io_mapping_unmap_atomic(vaddr_atomic); 215 io_mapping_unmap_atomic(vaddr_atomic);
220 if (unwritten) 216 if (unwritten)
221 return -EFAULT; 217 return -EFAULT;
@@ -1055,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev)
1055 drm_i915_private_t *dev_priv = dev->dev_private; 1051 drm_i915_private_t *dev_priv = dev->dev_private;
1056 uint32_t seqno; 1052 uint32_t seqno;
1057 1053
1054 if (!dev_priv->hw_status_page)
1055 return;
1056
1058 seqno = i915_get_gem_seqno(dev); 1057 seqno = i915_get_gem_seqno(dev);
1059 1058
1060 while (!list_empty(&dev_priv->mm.request_list)) { 1059 while (!list_empty(&dev_priv->mm.request_list)) {
@@ -2021,30 +2020,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2021 * drm_agp_chipset_flush 2020 * drm_agp_chipset_flush
2022 */ 2021 */
2023static void 2022static void
2024i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2023i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2025 uint32_t read_domains,
2026 uint32_t write_domain)
2027{ 2024{
2028 struct drm_device *dev = obj->dev; 2025 struct drm_device *dev = obj->dev;
2029 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2026 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2030 uint32_t invalidate_domains = 0; 2027 uint32_t invalidate_domains = 0;
2031 uint32_t flush_domains = 0; 2028 uint32_t flush_domains = 0;
2032 2029
2033 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2030 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2034 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2031 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2035 2032
2036#if WATCH_BUF 2033#if WATCH_BUF
2037 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2034 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2038 __func__, obj, 2035 __func__, obj,
2039 obj->read_domains, read_domains, 2036 obj->read_domains, obj->pending_read_domains,
2040 obj->write_domain, write_domain); 2037 obj->write_domain, obj->pending_write_domain);
2041#endif 2038#endif
2042 /* 2039 /*
2043 * If the object isn't moving to a new write domain, 2040 * If the object isn't moving to a new write domain,
2044 * let the object stay in multiple read domains 2041 * let the object stay in multiple read domains
2045 */ 2042 */
2046 if (write_domain == 0) 2043 if (obj->pending_write_domain == 0)
2047 read_domains |= obj->read_domains; 2044 obj->pending_read_domains |= obj->read_domains;
2048 else 2045 else
2049 obj_priv->dirty = 1; 2046 obj_priv->dirty = 1;
2050 2047
@@ -2054,15 +2051,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2054 * any read domains which differ from the old 2051 * any read domains which differ from the old
2055 * write domain 2052 * write domain
2056 */ 2053 */
2057 if (obj->write_domain && obj->write_domain != read_domains) { 2054 if (obj->write_domain &&
2055 obj->write_domain != obj->pending_read_domains) {
2058 flush_domains |= obj->write_domain; 2056 flush_domains |= obj->write_domain;
2059 invalidate_domains |= read_domains & ~obj->write_domain; 2057 invalidate_domains |=
2058 obj->pending_read_domains & ~obj->write_domain;
2060 } 2059 }
2061 /* 2060 /*
2062 * Invalidate any read caches which may have 2061 * Invalidate any read caches which may have
2063 * stale data. That is, any new read domains. 2062 * stale data. That is, any new read domains.
2064 */ 2063 */
2065 invalidate_domains |= read_domains & ~obj->read_domains; 2064 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2066 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2065 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2067#if WATCH_BUF 2066#if WATCH_BUF
2068 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 2067 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
@@ -2071,9 +2070,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2071 i915_gem_clflush_object(obj); 2070 i915_gem_clflush_object(obj);
2072 } 2071 }
2073 2072
2074 if ((write_domain | flush_domains) != 0) 2073 /* The actual obj->write_domain will be updated with
2075 obj->write_domain = write_domain; 2074 * pending_write_domain after we emit the accumulated flush for all
2076 obj->read_domains = read_domains; 2075 * of our domain changes in execbuffers (which clears objects'
2076 * write_domains). So if we have a current write domain that we
2077 * aren't changing, set pending_write_domain to that.
2078 */
2079 if (flush_domains == 0 && obj->pending_write_domain == 0)
2080 obj->pending_write_domain = obj->write_domain;
2081 obj->read_domains = obj->pending_read_domains;
2077 2082
2078 dev->invalidate_domains |= invalidate_domains; 2083 dev->invalidate_domains |= invalidate_domains;
2079 dev->flush_domains |= flush_domains; 2084 dev->flush_domains |= flush_domains;
@@ -2583,9 +2588,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2583 struct drm_gem_object *obj = object_list[i]; 2588 struct drm_gem_object *obj = object_list[i];
2584 2589
2585 /* Compute new gpu domains and update invalidate/flush */ 2590 /* Compute new gpu domains and update invalidate/flush */
2586 i915_gem_object_set_to_gpu_domain(obj, 2591 i915_gem_object_set_to_gpu_domain(obj);
2587 obj->pending_read_domains,
2588 obj->pending_write_domain);
2589 } 2592 }
2590 2593
2591 i915_verify_inactive(dev, __FILE__, __LINE__); 2594 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2604,6 +2607,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2604 (void)i915_add_request(dev, dev->flush_domains); 2607 (void)i915_add_request(dev, dev->flush_domains);
2605 } 2608 }
2606 2609
2610 for (i = 0; i < args->buffer_count; i++) {
2611 struct drm_gem_object *obj = object_list[i];
2612
2613 obj->write_domain = obj->pending_write_domain;
2614 }
2615
2607 i915_verify_inactive(dev, __FILE__, __LINE__); 2616 i915_verify_inactive(dev, __FILE__, __LINE__);
2608 2617
2609#if WATCH_COHERENCY 2618#if WATCH_COHERENCY
@@ -2866,6 +2875,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2866 return -EBADF; 2875 return -EBADF;
2867 } 2876 }
2868 2877
2878 /* Update the active list for the hardware's current position.
2879 * Otherwise this only updates on a delayed timer or when irqs are
2880 * actually unmasked, and our working set ends up being larger than
2881 * required.
2882 */
2883 i915_gem_retire_requests(dev);
2884
2869 obj_priv = obj->driver_private; 2885 obj_priv = obj->driver_private;
2870 /* Don't count being on the flushing list against the object being 2886 /* Don't count being on the flushing list against the object being
2871 * done. Otherwise, a buffer left on the flushing list but not getting 2887 * done. Otherwise, a buffer left on the flushing list but not getting
@@ -2967,7 +2983,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2967 return 0; 2983 return 0;
2968} 2984}
2969 2985
2970static int 2986int
2971i915_gem_idle(struct drm_device *dev) 2987i915_gem_idle(struct drm_device *dev)
2972{ 2988{
2973 drm_i915_private_t *dev_priv = dev->dev_private; 2989 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3130,16 +3146,20 @@ static void
3130i915_gem_cleanup_hws(struct drm_device *dev) 3146i915_gem_cleanup_hws(struct drm_device *dev)
3131{ 3147{
3132 drm_i915_private_t *dev_priv = dev->dev_private; 3148 drm_i915_private_t *dev_priv = dev->dev_private;
3133 struct drm_gem_object *obj = dev_priv->hws_obj; 3149 struct drm_gem_object *obj;
3134 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3150 struct drm_i915_gem_object *obj_priv;
3135 3151
3136 if (dev_priv->hws_obj == NULL) 3152 if (dev_priv->hws_obj == NULL)
3137 return; 3153 return;
3138 3154
3155 obj = dev_priv->hws_obj;
3156 obj_priv = obj->driver_private;
3157
3139 kunmap(obj_priv->page_list[0]); 3158 kunmap(obj_priv->page_list[0]);
3140 i915_gem_object_unpin(obj); 3159 i915_gem_object_unpin(obj);
3141 drm_gem_object_unreference(obj); 3160 drm_gem_object_unreference(obj);
3142 dev_priv->hws_obj = NULL; 3161 dev_priv->hws_obj = NULL;
3162
3143 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3163 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3144 dev_priv->hw_status_page = NULL; 3164 dev_priv->hw_status_page = NULL;
3145 3165
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4d2baf7b00be..65b635ce28c8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1008,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1008 temp = CURSOR_MODE_DISABLE; 1008 temp = CURSOR_MODE_DISABLE;
1009 addr = 0; 1009 addr = 0;
1010 bo = NULL; 1010 bo = NULL;
1011 mutex_lock(&dev->struct_mutex);
1011 goto finish; 1012 goto finish;
1012 } 1013 }
1013 1014
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 9fee3ca17344..9aefb5e5864d 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -79,10 +79,11 @@ static struct i2c_algo_bit_data ioc_data = {
79 .getsda = ioc_getsda, 79 .getsda = ioc_getsda,
80 .getscl = ioc_getscl, 80 .getscl = ioc_getscl,
81 .udelay = 80, 81 .udelay = 80,
82 .timeout = 100 82 .timeout = HZ,
83}; 83};
84 84
85static struct i2c_adapter ioc_ops = { 85static struct i2c_adapter ioc_ops = {
86 .nr = 0,
86 .algo_data = &ioc_data, 87 .algo_data = &ioc_data,
87}; 88};
88 89
@@ -90,7 +91,7 @@ static int __init i2c_ioc_init(void)
90{ 91{
91 force_ones = FORCE_ONES | SCL | SDA; 92 force_ones = FORCE_ONES | SCL | SDA;
92 93
93 return i2c_bit_add_bus(&ioc_ops); 94 return i2c_bit_add_numbered_bus(&ioc_ops);
94} 95}
95 96
96module_init(i2c_ioc_init); 97module_init(i2c_ioc_init);
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index edab51973bf5..a7c59908c457 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -72,7 +72,7 @@ static unsigned int amd_ec_wait_write(struct amd_smbus *smbus)
72{ 72{
73 int timeout = 500; 73 int timeout = 500;
74 74
75 while (timeout-- && (inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_IBF)) 75 while ((inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_IBF) && --timeout)
76 udelay(1); 76 udelay(1);
77 77
78 if (!timeout) { 78 if (!timeout) {
@@ -88,7 +88,7 @@ static unsigned int amd_ec_wait_read(struct amd_smbus *smbus)
88{ 88{
89 int timeout = 500; 89 int timeout = 500;
90 90
91 while (timeout-- && (~inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_OBF)) 91 while ((~inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_OBF) && --timeout)
92 udelay(1); 92 udelay(1);
93 93
94 if (!timeout) { 94 if (!timeout) {
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 8e8467970481..c016f7a2c5fc 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -114,7 +114,7 @@ static int ixp2000_i2c_probe(struct platform_device *plat_dev)
114 drv_data->algo_data.getsda = ixp2000_bit_getsda; 114 drv_data->algo_data.getsda = ixp2000_bit_getsda;
115 drv_data->algo_data.getscl = ixp2000_bit_getscl; 115 drv_data->algo_data.getscl = ixp2000_bit_getscl;
116 drv_data->algo_data.udelay = 6; 116 drv_data->algo_data.udelay = 6;
117 drv_data->algo_data.timeout = 100; 117 drv_data->algo_data.timeout = HZ;
118 118
119 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, 119 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
120 sizeof(drv_data->adapter.name)); 120 sizeof(drv_data->adapter.name));
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 6af68146c342..bdb1f7510e91 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -644,7 +644,7 @@ static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c,
644 644
645 i2c_pxa_start_message(i2c); 645 i2c_pxa_start_message(i2c);
646 646
647 while (timeout-- && i2c->msg_num > 0) { 647 while (i2c->msg_num > 0 && --timeout) {
648 i2c_pxa_handler(0, i2c); 648 i2c_pxa_handler(0, i2c);
649 udelay(10); 649 udelay(10);
650 } 650 }
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 162b74a04886..42df0eca43d5 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -76,7 +76,7 @@ static struct i2c_algo_bit_data scx200_i2c_data = {
76 .getsda = scx200_i2c_getsda, 76 .getsda = scx200_i2c_getsda,
77 .getscl = scx200_i2c_getscl, 77 .getscl = scx200_i2c_getscl,
78 .udelay = 10, 78 .udelay = 10,
79 .timeout = 100, 79 .timeout = HZ,
80}; 80};
81 81
82static struct i2c_adapter scx200_i2c_ops = { 82static struct i2c_adapter scx200_i2c_ops = {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index b1c9abe24c7b..e7d984866de0 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1831,7 +1831,8 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1831 case I2C_SMBUS_QUICK: 1831 case I2C_SMBUS_QUICK:
1832 msg[0].len = 0; 1832 msg[0].len = 0;
1833 /* Special case: The read/write field is used as data */ 1833 /* Special case: The read/write field is used as data */
1834 msg[0].flags = flags | (read_write==I2C_SMBUS_READ)?I2C_M_RD:0; 1834 msg[0].flags = flags | (read_write == I2C_SMBUS_READ ?
1835 I2C_M_RD : 0);
1835 num = 1; 1836 num = 1;
1836 break; 1837 break;
1837 case I2C_SMBUS_BYTE: 1838 case I2C_SMBUS_BYTE:
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c171988a9f51..7e13d2df9af3 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,6 +35,7 @@
35#include <linux/i2c.h> 35#include <linux/i2c.h>
36#include <linux/i2c-dev.h> 36#include <linux/i2c-dev.h>
37#include <linux/smp_lock.h> 37#include <linux/smp_lock.h>
38#include <linux/jiffies.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
40static struct i2c_driver i2cdev_driver; 41static struct i2c_driver i2cdev_driver;
@@ -422,7 +423,10 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
422 client->adapter->retries = arg; 423 client->adapter->retries = arg;
423 break; 424 break;
424 case I2C_TIMEOUT: 425 case I2C_TIMEOUT:
425 client->adapter->timeout = arg; 426 /* For historical reasons, user-space sets the timeout
427 * value in units of 10 ms.
428 */
429 client->adapter->timeout = msecs_to_jiffies(arg * 10);
426 break; 430 break;
427 default: 431 default:
428 /* NOTE: returning a fault code here could cause trouble 432 /* NOTE: returning a fault code here could cause trouble
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
index 2727bcd24194..467373cab8e5 100644
--- a/drivers/ieee1394/dma.h
+++ b/drivers/ieee1394/dma.h
@@ -12,6 +12,7 @@
12 12
13#include <asm/types.h> 13#include <asm/types.h>
14 14
15struct file;
15struct pci_dev; 16struct pci_dev;
16struct scatterlist; 17struct scatterlist;
17struct vm_area_struct; 18struct vm_area_struct;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 2beb8d94f7bd..1028e725a27e 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -1314,6 +1314,7 @@ EXPORT_SYMBOL(hpsb_make_lock64packet);
1314EXPORT_SYMBOL(hpsb_make_phypacket); 1314EXPORT_SYMBOL(hpsb_make_phypacket);
1315EXPORT_SYMBOL(hpsb_read); 1315EXPORT_SYMBOL(hpsb_read);
1316EXPORT_SYMBOL(hpsb_write); 1316EXPORT_SYMBOL(hpsb_write);
1317EXPORT_SYMBOL(hpsb_lock);
1317EXPORT_SYMBOL(hpsb_packet_success); 1318EXPORT_SYMBOL(hpsb_packet_success);
1318 1319
1319/** highlevel.c **/ 1320/** highlevel.c **/
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 10c3d9f8c038..675b3135d5f1 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -501,8 +501,6 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
501 if (length == 0) 501 if (length == 0)
502 return -EINVAL; 502 return -EINVAL;
503 503
504 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
505
506 packet = hpsb_make_readpacket(host, node, addr, length); 504 packet = hpsb_make_readpacket(host, node, addr, length);
507 505
508 if (!packet) { 506 if (!packet) {
@@ -550,8 +548,6 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
550 if (length == 0) 548 if (length == 0)
551 return -EINVAL; 549 return -EINVAL;
552 550
553 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
554
555 packet = hpsb_make_writepacket(host, node, addr, buffer, length); 551 packet = hpsb_make_writepacket(host, node, addr, buffer, length);
556 552
557 if (!packet) 553 if (!packet)
@@ -570,3 +566,30 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
570 566
571 return retval; 567 return retval;
572} 568}
569
570int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
571 u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
572{
573 struct hpsb_packet *packet;
574 int retval = 0;
575
576 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
577 if (!packet)
578 return -ENOMEM;
579
580 packet->generation = generation;
581 retval = hpsb_send_packet_and_wait(packet);
582 if (retval < 0)
583 goto hpsb_lock_fail;
584
585 retval = hpsb_packet_success(packet);
586
587 if (retval == 0)
588 *data = packet->data[0];
589
590hpsb_lock_fail:
591 hpsb_free_tlabel(packet);
592 hpsb_free_packet(packet);
593
594 return retval;
595}
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
index d2d5bc3546d7..20b693be14b2 100644
--- a/drivers/ieee1394/ieee1394_transactions.h
+++ b/drivers/ieee1394/ieee1394_transactions.h
@@ -30,6 +30,8 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
30 u64 addr, quadlet_t *buffer, size_t length); 30 u64 addr, quadlet_t *buffer, size_t length);
31int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, 31int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
32 u64 addr, quadlet_t *buffer, size_t length); 32 u64 addr, quadlet_t *buffer, size_t length);
33int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
34 u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
33 35
34#ifdef HPSB_DEBUG_TLABELS 36#ifdef HPSB_DEBUG_TLABELS
35extern spinlock_t hpsb_tlabel_lock; 37extern spinlock_t hpsb_tlabel_lock;
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
index b5de5f21ef78..c2089c093aa7 100644
--- a/drivers/ieee1394/iso.h
+++ b/drivers/ieee1394/iso.h
@@ -13,6 +13,7 @@
13#define IEEE1394_ISO_H 13#define IEEE1394_ISO_H
14 14
15#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
16#include <linux/wait.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17#include <asm/types.h> 18#include <asm/types.h>
18 19
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 906c5a98d814..53aada5bbe1e 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -971,6 +971,9 @@ static struct unit_directory *nodemgr_process_unit_directory
971 ud->ud_kv = ud_kv; 971 ud->ud_kv = ud_kv;
972 ud->id = (*id)++; 972 ud->id = (*id)++;
973 973
974 /* inherit vendor_id from root directory if none exists in unit dir */
975 ud->vendor_id = ne->vendor_id;
976
974 csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) { 977 csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) {
975 switch (kv->key.id) { 978 switch (kv->key.id) {
976 case CSR1212_KV_ID_VENDOR: 979 case CSR1212_KV_ID_VENDOR:
@@ -1265,7 +1268,8 @@ static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
1265 csr1212_destroy_csr(csr); 1268 csr1212_destroy_csr(csr);
1266 } 1269 }
1267 1270
1268 /* Mark the node current */ 1271 /* Finally, mark the node current */
1272 smp_wmb();
1269 ne->generation = generation; 1273 ne->generation = generation;
1270 1274
1271 if (ne->in_limbo) { 1275 if (ne->in_limbo) {
@@ -1798,7 +1802,7 @@ void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet)
1798{ 1802{
1799 packet->host = ne->host; 1803 packet->host = ne->host;
1800 packet->generation = ne->generation; 1804 packet->generation = ne->generation;
1801 barrier(); 1805 smp_rmb();
1802 packet->node_id = ne->nodeid; 1806 packet->node_id = ne->nodeid;
1803} 1807}
1804 1808
@@ -1807,7 +1811,7 @@ int hpsb_node_write(struct node_entry *ne, u64 addr,
1807{ 1811{
1808 unsigned int generation = ne->generation; 1812 unsigned int generation = ne->generation;
1809 1813
1810 barrier(); 1814 smp_rmb();
1811 return hpsb_write(ne->host, ne->nodeid, generation, 1815 return hpsb_write(ne->host, ne->nodeid, generation,
1812 addr, buffer, length); 1816 addr, buffer, length);
1813} 1817}
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 15ea09733e84..ee5acdbd114a 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -21,9 +21,11 @@
21#define _IEEE1394_NODEMGR_H 21#define _IEEE1394_NODEMGR_H
22 22
23#include <linux/device.h> 23#include <linux/device.h>
24#include <asm/system.h>
24#include <asm/types.h> 25#include <asm/types.h>
25 26
26#include "ieee1394_core.h" 27#include "ieee1394_core.h"
28#include "ieee1394_transactions.h"
27#include "ieee1394_types.h" 29#include "ieee1394_types.h"
28 30
29struct csr1212_csr; 31struct csr1212_csr;
@@ -154,6 +156,22 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
154void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet); 156void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet);
155int hpsb_node_write(struct node_entry *ne, u64 addr, 157int hpsb_node_write(struct node_entry *ne, u64 addr,
156 quadlet_t *buffer, size_t length); 158 quadlet_t *buffer, size_t length);
159static inline int hpsb_node_read(struct node_entry *ne, u64 addr,
160 quadlet_t *buffer, size_t length)
161{
162 unsigned int g = ne->generation;
163
164 smp_rmb();
165 return hpsb_read(ne->host, ne->nodeid, g, addr, buffer, length);
166}
167static inline int hpsb_node_lock(struct node_entry *ne, u64 addr, int extcode,
168 quadlet_t *buffer, quadlet_t arg)
169{
170 unsigned int g = ne->generation;
171
172 smp_rmb();
173 return hpsb_lock(ne->host, ne->nodeid, g, addr, extcode, buffer, arg);
174}
157int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *)); 175int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *));
158 176
159int init_ieee1394_nodemgr(void); 177int init_ieee1394_nodemgr(void);
diff --git a/drivers/isdn/sc/shmem.c b/drivers/isdn/sc/shmem.c
index 712220cef139..7f16d75d2d89 100644
--- a/drivers/isdn/sc/shmem.c
+++ b/drivers/isdn/sc/shmem.c
@@ -54,7 +54,7 @@ void memcpy_toshmem(int card, void *dest, const void *src, size_t n)
54 spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); 54 spin_unlock_irqrestore(&sc_adapter[card]->lock, flags);
55 pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename, 55 pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename,
56 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80); 56 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
57 pr_debug("%s: copying %d bytes from %#lx to %#lx\n", 57 pr_debug("%s: copying %zu bytes from %#lx to %#lx\n",
58 sc_adapter[card]->devicename, n, 58 sc_adapter[card]->devicename, n,
59 (unsigned long) src, 59 (unsigned long) src,
60 sc_adapter[card]->rambase + ((unsigned long) dest %0x4000)); 60 sc_adapter[card]->rambase + ((unsigned long) dest %0x4000));
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index 40ebde53b3ce..b0198691892a 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -51,6 +51,10 @@ comment "Supported SDMC DM1105 Adapters"
51 depends on DVB_CORE && PCI && I2C 51 depends on DVB_CORE && PCI && I2C
52source "drivers/media/dvb/dm1105/Kconfig" 52source "drivers/media/dvb/dm1105/Kconfig"
53 53
54comment "Supported FireWire (IEEE 1394) Adapters"
55 depends on DVB_CORE && IEEE1394
56source "drivers/media/dvb/firewire/Kconfig"
57
54comment "Supported DVB Frontends" 58comment "Supported DVB Frontends"
55 depends on DVB_CORE 59 depends on DVB_CORE
56source "drivers/media/dvb/frontends/Kconfig" 60source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index f91e9eb15e52..6092a5bb5a7d 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -3,3 +3,5 @@
3# 3#
4 4
5obj-y := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ dvb-usb/ pluto2/ siano/ dm1105/ 5obj-y := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ dvb-usb/ pluto2/ siano/ dm1105/
6
7obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/firewire/Kconfig b/drivers/media/dvb/firewire/Kconfig
new file mode 100644
index 000000000000..69028253e984
--- /dev/null
+++ b/drivers/media/dvb/firewire/Kconfig
@@ -0,0 +1,22 @@
1config DVB_FIREDTV
2 tristate "FireDTV and FloppyDTV"
3 depends on DVB_CORE && IEEE1394
4 help
5 Support for DVB receivers from Digital Everywhere
6 which are connected via IEEE 1394 (FireWire).
7
8 These devices don't have an MPEG decoder built in,
9 so you need an external software decoder to watch TV.
10
11 To compile this driver as a module, say M here:
12 the module will be called firedtv.
13
14if DVB_FIREDTV
15
16config DVB_FIREDTV_IEEE1394
17 def_bool IEEE1394
18
19config DVB_FIREDTV_INPUT
20 def_bool INPUT = y || (INPUT = m && DVB_FIREDTV = m)
21
22endif # DVB_FIREDTV
diff --git a/drivers/media/dvb/firewire/Makefile b/drivers/media/dvb/firewire/Makefile
new file mode 100644
index 000000000000..2034695ba194
--- /dev/null
+++ b/drivers/media/dvb/firewire/Makefile
@@ -0,0 +1,8 @@
1obj-$(CONFIG_DVB_FIREDTV) += firedtv.o
2
3firedtv-y := firedtv-avc.o firedtv-ci.o firedtv-dvb.o firedtv-fe.o
4firedtv-$(CONFIG_DVB_FIREDTV_IEEE1394) += firedtv-1394.o
5firedtv-$(CONFIG_DVB_FIREDTV_INPUT) += firedtv-rc.o
6
7ccflags-y += -Idrivers/media/dvb/dvb-core
8ccflags-$(CONFIG_DVB_FIREDTV_IEEE1394) += -Idrivers/ieee1394
diff --git a/drivers/media/dvb/firewire/firedtv-1394.c b/drivers/media/dvb/firewire/firedtv-1394.c
new file mode 100644
index 000000000000..4e207658c5d9
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-1394.c
@@ -0,0 +1,285 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2007-2008 Ben Backx <ben@bbackx.com>
6 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 */
13
14#include <linux/device.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/list.h>
18#include <linux/spinlock.h>
19#include <linux/types.h>
20
21#include <dma.h>
22#include <csr1212.h>
23#include <highlevel.h>
24#include <hosts.h>
25#include <ieee1394.h>
26#include <iso.h>
27#include <nodemgr.h>
28
29#include "firedtv.h"
30
31static LIST_HEAD(node_list);
32static DEFINE_SPINLOCK(node_list_lock);
33
34#define FIREWIRE_HEADER_SIZE 4
35#define CIP_HEADER_SIZE 8
36
37static void rawiso_activity_cb(struct hpsb_iso *iso)
38{
39 struct firedtv *f, *fdtv = NULL;
40 unsigned int i, num, packet;
41 unsigned char *buf;
42 unsigned long flags;
43 int count;
44
45 spin_lock_irqsave(&node_list_lock, flags);
46 list_for_each_entry(f, &node_list, list)
47 if (f->backend_data == iso) {
48 fdtv = f;
49 break;
50 }
51 spin_unlock_irqrestore(&node_list_lock, flags);
52
53 packet = iso->first_packet;
54 num = hpsb_iso_n_ready(iso);
55
56 if (!fdtv) {
57 dev_err(fdtv->device, "received at unknown iso channel\n");
58 goto out;
59 }
60
61 for (i = 0; i < num; i++, packet = (packet + 1) % iso->buf_packets) {
62 buf = dma_region_i(&iso->data_buf, unsigned char,
63 iso->infos[packet].offset + CIP_HEADER_SIZE);
64 count = (iso->infos[packet].len - CIP_HEADER_SIZE) /
65 (188 + FIREWIRE_HEADER_SIZE);
66
67 /* ignore empty packet */
68 if (iso->infos[packet].len <= CIP_HEADER_SIZE)
69 continue;
70
71 while (count--) {
72 if (buf[FIREWIRE_HEADER_SIZE] == 0x47)
73 dvb_dmx_swfilter_packets(&fdtv->demux,
74 &buf[FIREWIRE_HEADER_SIZE], 1);
75 else
76 dev_err(fdtv->device,
77 "skipping invalid packet\n");
78 buf += 188 + FIREWIRE_HEADER_SIZE;
79 }
80 }
81out:
82 hpsb_iso_recv_release_packets(iso, num);
83}
84
85static inline struct node_entry *node_of(struct firedtv *fdtv)
86{
87 return container_of(fdtv->device, struct unit_directory, device)->ne;
88}
89
90static int node_lock(struct firedtv *fdtv, u64 addr, void *data, __be32 arg)
91{
92 return hpsb_node_lock(node_of(fdtv), addr, EXTCODE_COMPARE_SWAP, data,
93 (__force quadlet_t)arg);
94}
95
96static int node_read(struct firedtv *fdtv, u64 addr, void *data, size_t len)
97{
98 return hpsb_node_read(node_of(fdtv), addr, data, len);
99}
100
101static int node_write(struct firedtv *fdtv, u64 addr, void *data, size_t len)
102{
103 return hpsb_node_write(node_of(fdtv), addr, data, len);
104}
105
106#define FDTV_ISO_BUFFER_PACKETS 256
107#define FDTV_ISO_BUFFER_SIZE (FDTV_ISO_BUFFER_PACKETS * 200)
108
109static int start_iso(struct firedtv *fdtv)
110{
111 struct hpsb_iso *iso_handle;
112 int ret;
113
114 iso_handle = hpsb_iso_recv_init(node_of(fdtv)->host,
115 FDTV_ISO_BUFFER_SIZE, FDTV_ISO_BUFFER_PACKETS,
116 fdtv->isochannel, HPSB_ISO_DMA_DEFAULT,
117 -1, /* stat.config.irq_interval */
118 rawiso_activity_cb);
119 if (iso_handle == NULL) {
120 dev_err(fdtv->device, "cannot initialize iso receive\n");
121 return -ENOMEM;
122 }
123 fdtv->backend_data = iso_handle;
124
125 ret = hpsb_iso_recv_start(iso_handle, -1, -1, 0);
126 if (ret != 0) {
127 dev_err(fdtv->device, "cannot start iso receive\n");
128 hpsb_iso_shutdown(iso_handle);
129 fdtv->backend_data = NULL;
130 }
131 return ret;
132}
133
134static void stop_iso(struct firedtv *fdtv)
135{
136 struct hpsb_iso *iso_handle = fdtv->backend_data;
137
138 if (iso_handle != NULL) {
139 hpsb_iso_stop(iso_handle);
140 hpsb_iso_shutdown(iso_handle);
141 }
142 fdtv->backend_data = NULL;
143}
144
145static const struct firedtv_backend fdtv_1394_backend = {
146 .lock = node_lock,
147 .read = node_read,
148 .write = node_write,
149 .start_iso = start_iso,
150 .stop_iso = stop_iso,
151};
152
153static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
154 int cts, u8 *data, size_t length)
155{
156 struct firedtv *f, *fdtv = NULL;
157 unsigned long flags;
158 int su;
159
160 if (length == 0 || (data[0] & 0xf0) != 0)
161 return;
162
163 su = data[1] & 0x7;
164
165 spin_lock_irqsave(&node_list_lock, flags);
166 list_for_each_entry(f, &node_list, list)
167 if (node_of(f)->host == host &&
168 node_of(f)->nodeid == nodeid &&
169 (f->subunit == su || (f->subunit == 0 && su == 0x7))) {
170 fdtv = f;
171 break;
172 }
173 spin_unlock_irqrestore(&node_list_lock, flags);
174
175 if (fdtv)
176 avc_recv(fdtv, data, length);
177}
178
179static int node_probe(struct device *dev)
180{
181 struct unit_directory *ud =
182 container_of(dev, struct unit_directory, device);
183 struct firedtv *fdtv;
184 int kv_len, err;
185 void *kv_str;
186
187 kv_len = (ud->model_name_kv->value.leaf.len - 2) * sizeof(quadlet_t);
188 kv_str = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(ud->model_name_kv);
189
190 fdtv = fdtv_alloc(dev, &fdtv_1394_backend, kv_str, kv_len);
191 if (!fdtv)
192 return -ENOMEM;
193
194 /*
195 * Work around a bug in udev's path_id script: Use the fw-host's dev
196 * instead of the unit directory's dev as parent of the input device.
197 */
198 err = fdtv_register_rc(fdtv, dev->parent->parent);
199 if (err)
200 goto fail_free;
201
202 spin_lock_irq(&node_list_lock);
203 list_add_tail(&fdtv->list, &node_list);
204 spin_unlock_irq(&node_list_lock);
205
206 err = avc_identify_subunit(fdtv);
207 if (err)
208 goto fail;
209
210 err = fdtv_dvb_register(fdtv);
211 if (err)
212 goto fail;
213
214 avc_register_remote_control(fdtv);
215 return 0;
216fail:
217 spin_lock_irq(&node_list_lock);
218 list_del(&fdtv->list);
219 spin_unlock_irq(&node_list_lock);
220 fdtv_unregister_rc(fdtv);
221fail_free:
222 kfree(fdtv);
223 return err;
224}
225
226static int node_remove(struct device *dev)
227{
228 struct firedtv *fdtv = dev->driver_data;
229
230 fdtv_dvb_unregister(fdtv);
231
232 spin_lock_irq(&node_list_lock);
233 list_del(&fdtv->list);
234 spin_unlock_irq(&node_list_lock);
235
236 cancel_work_sync(&fdtv->remote_ctrl_work);
237 fdtv_unregister_rc(fdtv);
238
239 kfree(fdtv);
240 return 0;
241}
242
243static int node_update(struct unit_directory *ud)
244{
245 struct firedtv *fdtv = ud->device.driver_data;
246
247 if (fdtv->isochannel >= 0)
248 cmp_establish_pp_connection(fdtv, fdtv->subunit,
249 fdtv->isochannel);
250 return 0;
251}
252
253static struct hpsb_protocol_driver fdtv_driver = {
254 .name = "firedtv",
255 .update = node_update,
256 .driver = {
257 .probe = node_probe,
258 .remove = node_remove,
259 },
260};
261
262static struct hpsb_highlevel fdtv_highlevel = {
263 .name = "firedtv",
264 .fcp_request = fcp_request,
265};
266
267int __init fdtv_1394_init(struct ieee1394_device_id id_table[])
268{
269 int ret;
270
271 hpsb_register_highlevel(&fdtv_highlevel);
272 fdtv_driver.id_table = id_table;
273 ret = hpsb_register_protocol(&fdtv_driver);
274 if (ret) {
275 printk(KERN_ERR "firedtv: failed to register protocol\n");
276 hpsb_unregister_highlevel(&fdtv_highlevel);
277 }
278 return ret;
279}
280
281void __exit fdtv_1394_exit(void)
282{
283 hpsb_unregister_protocol(&fdtv_driver);
284 hpsb_unregister_highlevel(&fdtv_highlevel);
285}
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
new file mode 100644
index 000000000000..b55d9ccaf33e
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -0,0 +1,1315 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Ben Backx <ben@bbackx.com>
6 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 */
13
14#include <linux/bug.h>
15#include <linux/crc32.h>
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/jiffies.h>
19#include <linux/kernel.h>
20#include <linux/moduleparam.h>
21#include <linux/mutex.h>
22#include <linux/string.h>
23#include <linux/stringify.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
26
27#include "firedtv.h"
28
29#define FCP_COMMAND_REGISTER 0xfffff0000b00ULL
30
31#define AVC_CTYPE_CONTROL 0x0
32#define AVC_CTYPE_STATUS 0x1
33#define AVC_CTYPE_NOTIFY 0x3
34
35#define AVC_RESPONSE_ACCEPTED 0x9
36#define AVC_RESPONSE_STABLE 0xc
37#define AVC_RESPONSE_CHANGED 0xd
38#define AVC_RESPONSE_INTERIM 0xf
39
40#define AVC_SUBUNIT_TYPE_TUNER (0x05 << 3)
41#define AVC_SUBUNIT_TYPE_UNIT (0x1f << 3)
42
43#define AVC_OPCODE_VENDOR 0x00
44#define AVC_OPCODE_READ_DESCRIPTOR 0x09
45#define AVC_OPCODE_DSIT 0xc8
46#define AVC_OPCODE_DSD 0xcb
47
48#define DESCRIPTOR_TUNER_STATUS 0x80
49#define DESCRIPTOR_SUBUNIT_IDENTIFIER 0x00
50
51#define SFE_VENDOR_DE_COMPANYID_0 0x00 /* OUI of Digital Everywhere */
52#define SFE_VENDOR_DE_COMPANYID_1 0x12
53#define SFE_VENDOR_DE_COMPANYID_2 0x87
54
55#define SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL 0x0a
56#define SFE_VENDOR_OPCODE_LNB_CONTROL 0x52
57#define SFE_VENDOR_OPCODE_TUNE_QPSK 0x58 /* for DVB-S */
58
59#define SFE_VENDOR_OPCODE_GET_FIRMWARE_VERSION 0x00
60#define SFE_VENDOR_OPCODE_HOST2CA 0x56
61#define SFE_VENDOR_OPCODE_CA2HOST 0x57
62#define SFE_VENDOR_OPCODE_CISTATUS 0x59
63#define SFE_VENDOR_OPCODE_TUNE_QPSK2 0x60 /* for DVB-S2 */
64
65#define SFE_VENDOR_TAG_CA_RESET 0x00
66#define SFE_VENDOR_TAG_CA_APPLICATION_INFO 0x01
67#define SFE_VENDOR_TAG_CA_PMT 0x02
68#define SFE_VENDOR_TAG_CA_DATE_TIME 0x04
69#define SFE_VENDOR_TAG_CA_MMI 0x05
70#define SFE_VENDOR_TAG_CA_ENTER_MENU 0x07
71
72#define EN50221_LIST_MANAGEMENT_ONLY 0x03
73#define EN50221_TAG_APP_INFO 0x9f8021
74#define EN50221_TAG_CA_INFO 0x9f8031
75
76struct avc_command_frame {
77 int length;
78 u8 ctype;
79 u8 subunit;
80 u8 opcode;
81 u8 operand[509];
82};
83
84struct avc_response_frame {
85 int length;
86 u8 response;
87 u8 subunit;
88 u8 opcode;
89 u8 operand[509];
90};
91
92#define AVC_DEBUG_FCP_SUBACTIONS 1
93#define AVC_DEBUG_FCP_PAYLOADS 2
94
95static int avc_debug;
96module_param_named(debug, avc_debug, int, 0644);
97MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
98 ", FCP subactions = " __stringify(AVC_DEBUG_FCP_SUBACTIONS)
99 ", FCP payloads = " __stringify(AVC_DEBUG_FCP_PAYLOADS)
100 ", or all = -1)");
101
102static const char *debug_fcp_ctype(unsigned int ctype)
103{
104 static const char *ctypes[] = {
105 [0x0] = "CONTROL", [0x1] = "STATUS",
106 [0x2] = "SPECIFIC INQUIRY", [0x3] = "NOTIFY",
107 [0x4] = "GENERAL INQUIRY", [0x8] = "NOT IMPLEMENTED",
108 [0x9] = "ACCEPTED", [0xa] = "REJECTED",
109 [0xb] = "IN TRANSITION", [0xc] = "IMPLEMENTED/STABLE",
110 [0xd] = "CHANGED", [0xf] = "INTERIM",
111 };
112 const char *ret = ctype < ARRAY_SIZE(ctypes) ? ctypes[ctype] : NULL;
113
114 return ret ? ret : "?";
115}
116
117static const char *debug_fcp_opcode(unsigned int opcode,
118 const u8 *data, size_t length)
119{
120 switch (opcode) {
121 case AVC_OPCODE_VENDOR: break;
122 case AVC_OPCODE_READ_DESCRIPTOR: return "ReadDescriptor";
123 case AVC_OPCODE_DSIT: return "DirectSelectInfo.Type";
124 case AVC_OPCODE_DSD: return "DirectSelectData";
125 default: return "?";
126 }
127
128 if (length < 7 ||
129 data[3] != SFE_VENDOR_DE_COMPANYID_0 ||
130 data[4] != SFE_VENDOR_DE_COMPANYID_1 ||
131 data[5] != SFE_VENDOR_DE_COMPANYID_2)
132 return "Vendor";
133
134 switch (data[6]) {
135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
138 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
139 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
140 }
141 return "Vendor";
142}
143
144static void debug_fcp(const u8 *data, size_t length)
145{
146 unsigned int subunit_type, subunit_id, op;
147 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
148
149 if (avc_debug & AVC_DEBUG_FCP_SUBACTIONS) {
150 subunit_type = data[1] >> 3;
151 subunit_id = data[1] & 7;
152 op = subunit_type == 0x1e || subunit_id == 5 ? ~0 : data[2];
153 printk(KERN_INFO "%ssu=%x.%x l=%d: %-8s - %s\n",
154 prefix, subunit_type, subunit_id, length,
155 debug_fcp_ctype(data[0]),
156 debug_fcp_opcode(op, data, length));
157 }
158
159 if (avc_debug & AVC_DEBUG_FCP_PAYLOADS)
160 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_NONE, 16, 1,
161 data, length, false);
162}
163
164static int __avc_write(struct firedtv *fdtv,
165 const struct avc_command_frame *c, struct avc_response_frame *r)
166{
167 int err, retry;
168
169 if (r)
170 fdtv->avc_reply_received = false;
171
172 for (retry = 0; retry < 6; retry++) {
173 if (unlikely(avc_debug))
174 debug_fcp(&c->ctype, c->length);
175
176 err = fdtv->backend->write(fdtv, FCP_COMMAND_REGISTER,
177 (void *)&c->ctype, c->length);
178 if (err) {
179 fdtv->avc_reply_received = true;
180 dev_err(fdtv->device, "FCP command write failed\n");
181 return err;
182 }
183
184 if (!r)
185 return 0;
186
187 /*
188 * AV/C specs say that answers should be sent within 150 ms.
189 * Time out after 200 ms.
190 */
191 if (wait_event_timeout(fdtv->avc_wait,
192 fdtv->avc_reply_received,
193 msecs_to_jiffies(200)) != 0) {
194 r->length = fdtv->response_length;
195 memcpy(&r->response, fdtv->response, r->length);
196
197 return 0;
198 }
199 }
200 dev_err(fdtv->device, "FCP response timed out\n");
201 return -ETIMEDOUT;
202}
203
204static int avc_write(struct firedtv *fdtv,
205 const struct avc_command_frame *c, struct avc_response_frame *r)
206{
207 int ret;
208
209 if (mutex_lock_interruptible(&fdtv->avc_mutex))
210 return -EINTR;
211
212 ret = __avc_write(fdtv, c, r);
213
214 mutex_unlock(&fdtv->avc_mutex);
215 return ret;
216}
217
218int avc_recv(struct firedtv *fdtv, void *data, size_t length)
219{
220 struct avc_response_frame *r =
221 data - offsetof(struct avc_response_frame, response);
222
223 if (unlikely(avc_debug))
224 debug_fcp(data, length);
225
226 if (length >= 8 &&
227 r->operand[0] == SFE_VENDOR_DE_COMPANYID_0 &&
228 r->operand[1] == SFE_VENDOR_DE_COMPANYID_1 &&
229 r->operand[2] == SFE_VENDOR_DE_COMPANYID_2 &&
230 r->operand[3] == SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL) {
231 if (r->response == AVC_RESPONSE_CHANGED) {
232 fdtv_handle_rc(fdtv,
233 r->operand[4] << 8 | r->operand[5]);
234 schedule_work(&fdtv->remote_ctrl_work);
235 } else if (r->response != AVC_RESPONSE_INTERIM) {
236 dev_info(fdtv->device,
237 "remote control result = %d\n", r->response);
238 }
239 return 0;
240 }
241
242 if (fdtv->avc_reply_received) {
243 dev_err(fdtv->device, "out-of-order AVC response, ignored\n");
244 return -EIO;
245 }
246
247 memcpy(fdtv->response, data, length);
248 fdtv->response_length = length;
249
250 fdtv->avc_reply_received = true;
251 wake_up(&fdtv->avc_wait);
252
253 return 0;
254}
255
256/*
257 * tuning command for setting the relative LNB frequency
258 * (not supported by the AVC standard)
259 */
260static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
261 struct dvb_frontend_parameters *params,
262 struct avc_command_frame *c)
263{
264 c->opcode = AVC_OPCODE_VENDOR;
265
266 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
267 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
268 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
269 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
270
271 c->operand[4] = (params->frequency >> 24) & 0xff;
272 c->operand[5] = (params->frequency >> 16) & 0xff;
273 c->operand[6] = (params->frequency >> 8) & 0xff;
274 c->operand[7] = params->frequency & 0xff;
275
276 c->operand[8] = ((params->u.qpsk.symbol_rate / 1000) >> 8) & 0xff;
277 c->operand[9] = (params->u.qpsk.symbol_rate / 1000) & 0xff;
278
279 switch (params->u.qpsk.fec_inner) {
280 case FEC_1_2: c->operand[10] = 0x1; break;
281 case FEC_2_3: c->operand[10] = 0x2; break;
282 case FEC_3_4: c->operand[10] = 0x3; break;
283 case FEC_5_6: c->operand[10] = 0x4; break;
284 case FEC_7_8: c->operand[10] = 0x5; break;
285 case FEC_4_5:
286 case FEC_8_9:
287 case FEC_AUTO:
288 default: c->operand[10] = 0x0;
289 }
290
291 if (fdtv->voltage == 0xff)
292 c->operand[11] = 0xff;
293 else if (fdtv->voltage == SEC_VOLTAGE_18) /* polarisation */
294 c->operand[11] = 0;
295 else
296 c->operand[11] = 1;
297
298 if (fdtv->tone == 0xff)
299 c->operand[12] = 0xff;
300 else if (fdtv->tone == SEC_TONE_ON) /* band */
301 c->operand[12] = 1;
302 else
303 c->operand[12] = 0;
304
305 if (fdtv->type == FIREDTV_DVB_S2) {
306 c->operand[13] = 0x1;
307 c->operand[14] = 0xff;
308 c->operand[15] = 0xff;
309 c->length = 20;
310 } else {
311 c->length = 16;
312 }
313}
314
315static void avc_tuner_dsd_dvb_c(struct dvb_frontend_parameters *params,
316 struct avc_command_frame *c)
317{
318 c->opcode = AVC_OPCODE_DSD;
319
320 c->operand[0] = 0; /* source plug */
321 c->operand[1] = 0xd2; /* subfunction replace */
322 c->operand[2] = 0x20; /* system id = DVB */
323 c->operand[3] = 0x00; /* antenna number */
324 c->operand[4] = 0x11; /* system_specific_multiplex selection_length */
325
326 /* multiplex_valid_flags, high byte */
327 c->operand[5] = 0 << 7 /* reserved */
328 | 0 << 6 /* Polarisation */
329 | 0 << 5 /* Orbital_Pos */
330 | 1 << 4 /* Frequency */
331 | 1 << 3 /* Symbol_Rate */
332 | 0 << 2 /* FEC_outer */
333 | (params->u.qam.fec_inner != FEC_AUTO ? 1 << 1 : 0)
334 | (params->u.qam.modulation != QAM_AUTO ? 1 << 0 : 0);
335
336 /* multiplex_valid_flags, low byte */
337 c->operand[6] = 0 << 7 /* NetworkID */
338 | 0 << 0 /* reserved */ ;
339
340 c->operand[7] = 0x00;
341 c->operand[8] = 0x00;
342 c->operand[9] = 0x00;
343 c->operand[10] = 0x00;
344
345 c->operand[11] = (((params->frequency / 4000) >> 16) & 0xff) | (2 << 6);
346 c->operand[12] = ((params->frequency / 4000) >> 8) & 0xff;
347 c->operand[13] = (params->frequency / 4000) & 0xff;
348 c->operand[14] = ((params->u.qpsk.symbol_rate / 1000) >> 12) & 0xff;
349 c->operand[15] = ((params->u.qpsk.symbol_rate / 1000) >> 4) & 0xff;
350 c->operand[16] = ((params->u.qpsk.symbol_rate / 1000) << 4) & 0xf0;
351 c->operand[17] = 0x00;
352
353 switch (params->u.qpsk.fec_inner) {
354 case FEC_1_2: c->operand[18] = 0x1; break;
355 case FEC_2_3: c->operand[18] = 0x2; break;
356 case FEC_3_4: c->operand[18] = 0x3; break;
357 case FEC_5_6: c->operand[18] = 0x4; break;
358 case FEC_7_8: c->operand[18] = 0x5; break;
359 case FEC_8_9: c->operand[18] = 0x6; break;
360 case FEC_4_5: c->operand[18] = 0x8; break;
361 case FEC_AUTO:
362 default: c->operand[18] = 0x0;
363 }
364
365 switch (params->u.qam.modulation) {
366 case QAM_16: c->operand[19] = 0x08; break;
367 case QAM_32: c->operand[19] = 0x10; break;
368 case QAM_64: c->operand[19] = 0x18; break;
369 case QAM_128: c->operand[19] = 0x20; break;
370 case QAM_256: c->operand[19] = 0x28; break;
371 case QAM_AUTO:
372 default: c->operand[19] = 0x00;
373 }
374
375 c->operand[20] = 0x00;
376 c->operand[21] = 0x00;
377 /* Nr_of_dsd_sel_specs = 0 -> no PIDs are transmitted */
378 c->operand[22] = 0x00;
379
380 c->length = 28;
381}
382
383static void avc_tuner_dsd_dvb_t(struct dvb_frontend_parameters *params,
384 struct avc_command_frame *c)
385{
386 struct dvb_ofdm_parameters *ofdm = &params->u.ofdm;
387
388 c->opcode = AVC_OPCODE_DSD;
389
390 c->operand[0] = 0; /* source plug */
391 c->operand[1] = 0xd2; /* subfunction replace */
392 c->operand[2] = 0x20; /* system id = DVB */
393 c->operand[3] = 0x00; /* antenna number */
394 c->operand[4] = 0x0c; /* system_specific_multiplex selection_length */
395
396 /* multiplex_valid_flags, high byte */
397 c->operand[5] =
398 0 << 7 /* reserved */
399 | 1 << 6 /* CenterFrequency */
400 | (ofdm->bandwidth != BANDWIDTH_AUTO ? 1 << 5 : 0)
401 | (ofdm->constellation != QAM_AUTO ? 1 << 4 : 0)
402 | (ofdm->hierarchy_information != HIERARCHY_AUTO ? 1 << 3 : 0)
403 | (ofdm->code_rate_HP != FEC_AUTO ? 1 << 2 : 0)
404 | (ofdm->code_rate_LP != FEC_AUTO ? 1 << 1 : 0)
405 | (ofdm->guard_interval != GUARD_INTERVAL_AUTO ? 1 << 0 : 0);
406
407 /* multiplex_valid_flags, low byte */
408 c->operand[6] =
409 0 << 7 /* NetworkID */
410 | (ofdm->transmission_mode != TRANSMISSION_MODE_AUTO ? 1 << 6 : 0)
411 | 0 << 5 /* OtherFrequencyFlag */
412 | 0 << 0 /* reserved */ ;
413
414 c->operand[7] = 0x0;
415 c->operand[8] = (params->frequency / 10) >> 24;
416 c->operand[9] = ((params->frequency / 10) >> 16) & 0xff;
417 c->operand[10] = ((params->frequency / 10) >> 8) & 0xff;
418 c->operand[11] = (params->frequency / 10) & 0xff;
419
420 switch (ofdm->bandwidth) {
421 case BANDWIDTH_7_MHZ: c->operand[12] = 0x20; break;
422 case BANDWIDTH_8_MHZ:
423 case BANDWIDTH_6_MHZ: /* not defined by AVC spec */
424 case BANDWIDTH_AUTO:
425 default: c->operand[12] = 0x00;
426 }
427
428 switch (ofdm->constellation) {
429 case QAM_16: c->operand[13] = 1 << 6; break;
430 case QAM_64: c->operand[13] = 2 << 6; break;
431 case QPSK:
432 default: c->operand[13] = 0x00;
433 }
434
435 switch (ofdm->hierarchy_information) {
436 case HIERARCHY_1: c->operand[13] |= 1 << 3; break;
437 case HIERARCHY_2: c->operand[13] |= 2 << 3; break;
438 case HIERARCHY_4: c->operand[13] |= 3 << 3; break;
439 case HIERARCHY_AUTO:
440 case HIERARCHY_NONE:
441 default: break;
442 }
443
444 switch (ofdm->code_rate_HP) {
445 case FEC_2_3: c->operand[13] |= 1; break;
446 case FEC_3_4: c->operand[13] |= 2; break;
447 case FEC_5_6: c->operand[13] |= 3; break;
448 case FEC_7_8: c->operand[13] |= 4; break;
449 case FEC_1_2:
450 default: break;
451 }
452
453 switch (ofdm->code_rate_LP) {
454 case FEC_2_3: c->operand[14] = 1 << 5; break;
455 case FEC_3_4: c->operand[14] = 2 << 5; break;
456 case FEC_5_6: c->operand[14] = 3 << 5; break;
457 case FEC_7_8: c->operand[14] = 4 << 5; break;
458 case FEC_1_2:
459 default: c->operand[14] = 0x00; break;
460 }
461
462 switch (ofdm->guard_interval) {
463 case GUARD_INTERVAL_1_16: c->operand[14] |= 1 << 3; break;
464 case GUARD_INTERVAL_1_8: c->operand[14] |= 2 << 3; break;
465 case GUARD_INTERVAL_1_4: c->operand[14] |= 3 << 3; break;
466 case GUARD_INTERVAL_1_32:
467 case GUARD_INTERVAL_AUTO:
468 default: break;
469 }
470
471 switch (ofdm->transmission_mode) {
472 case TRANSMISSION_MODE_8K: c->operand[14] |= 1 << 1; break;
473 case TRANSMISSION_MODE_2K:
474 case TRANSMISSION_MODE_AUTO:
475 default: break;
476 }
477
478 c->operand[15] = 0x00; /* network_ID[0] */
479 c->operand[16] = 0x00; /* network_ID[1] */
480 /* Nr_of_dsd_sel_specs = 0 -> no PIDs are transmitted */
481 c->operand[17] = 0x00;
482
483 c->length = 24;
484}
485
486int avc_tuner_dsd(struct firedtv *fdtv,
487 struct dvb_frontend_parameters *params)
488{
489 char buffer[sizeof(struct avc_command_frame)];
490 struct avc_command_frame *c = (void *)buffer;
491 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
492
493 memset(c, 0, sizeof(*c));
494
495 c->ctype = AVC_CTYPE_CONTROL;
496 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
497
498 switch (fdtv->type) {
499 case FIREDTV_DVB_S:
500 case FIREDTV_DVB_S2: avc_tuner_tuneqpsk(fdtv, params, c); break;
501 case FIREDTV_DVB_C: avc_tuner_dsd_dvb_c(params, c); break;
502 case FIREDTV_DVB_T: avc_tuner_dsd_dvb_t(params, c); break;
503 default:
504 BUG();
505 }
506
507 if (avc_write(fdtv, c, r) < 0)
508 return -EIO;
509
510 msleep(500);
511#if 0
512 /* FIXME: */
513 /* u8 *status was an out-parameter of avc_tuner_dsd, unused by caller */
514 if (status)
515 *status = r->operand[2];
516#endif
517 return 0;
518}
519
520int avc_tuner_set_pids(struct firedtv *fdtv, unsigned char pidc, u16 pid[])
521{
522 char buffer[sizeof(struct avc_command_frame)];
523 struct avc_command_frame *c = (void *)buffer;
524 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
525 int pos, k;
526
527 if (pidc > 16 && pidc != 0xff)
528 return -EINVAL;
529
530 memset(c, 0, sizeof(*c));
531
532 c->ctype = AVC_CTYPE_CONTROL;
533 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
534 c->opcode = AVC_OPCODE_DSD;
535
536 c->operand[0] = 0; /* source plug */
537 c->operand[1] = 0xd2; /* subfunction replace */
538 c->operand[2] = 0x20; /* system id = DVB */
539 c->operand[3] = 0x00; /* antenna number */
540 c->operand[4] = 0x00; /* system_specific_multiplex selection_length */
541 c->operand[5] = pidc; /* Nr_of_dsd_sel_specs */
542
543 pos = 6;
544 if (pidc != 0xff)
545 for (k = 0; k < pidc; k++) {
546 c->operand[pos++] = 0x13; /* flowfunction relay */
547 c->operand[pos++] = 0x80; /* dsd_sel_spec_valid_flags -> PID */
548 c->operand[pos++] = (pid[k] >> 8) & 0x1f;
549 c->operand[pos++] = pid[k] & 0xff;
550 c->operand[pos++] = 0x00; /* tableID */
551 c->operand[pos++] = 0x00; /* filter_length */
552 }
553
554 c->length = ALIGN(3 + pos, 4);
555
556 if (avc_write(fdtv, c, r) < 0)
557 return -EIO;
558
559 msleep(50);
560 return 0;
561}
562
563int avc_tuner_get_ts(struct firedtv *fdtv)
564{
565 char buffer[sizeof(struct avc_command_frame)];
566 struct avc_command_frame *c = (void *)buffer;
567 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
568 int sl;
569
570 memset(c, 0, sizeof(*c));
571
572 c->ctype = AVC_CTYPE_CONTROL;
573 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
574 c->opcode = AVC_OPCODE_DSIT;
575
576 sl = fdtv->type == FIREDTV_DVB_T ? 0x0c : 0x11;
577
578 c->operand[0] = 0; /* source plug */
579 c->operand[1] = 0xd2; /* subfunction replace */
580 c->operand[2] = 0xff; /* status */
581 c->operand[3] = 0x20; /* system id = DVB */
582 c->operand[4] = 0x00; /* antenna number */
583 c->operand[5] = 0x0; /* system_specific_search_flags */
584 c->operand[6] = sl; /* system_specific_multiplex selection_length */
585 c->operand[7] = 0x00; /* valid_flags [0] */
586 c->operand[8] = 0x00; /* valid_flags [1] */
587 c->operand[7 + sl] = 0x00; /* nr_of_dsit_sel_specs (always 0) */
588
589 c->length = fdtv->type == FIREDTV_DVB_T ? 24 : 28;
590
591 if (avc_write(fdtv, c, r) < 0)
592 return -EIO;
593
594 msleep(250);
595 return 0;
596}
597
598int avc_identify_subunit(struct firedtv *fdtv)
599{
600 char buffer[sizeof(struct avc_command_frame)];
601 struct avc_command_frame *c = (void *)buffer;
602 struct avc_response_frame *r = (void *)buffer;
603
604 memset(c, 0, sizeof(*c));
605
606 c->ctype = AVC_CTYPE_CONTROL;
607 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
608 c->opcode = AVC_OPCODE_READ_DESCRIPTOR;
609
610 c->operand[0] = DESCRIPTOR_SUBUNIT_IDENTIFIER;
611 c->operand[1] = 0xff;
612 c->operand[2] = 0x00;
613 c->operand[3] = 0x00; /* length highbyte */
614 c->operand[4] = 0x08; /* length lowbyte */
615 c->operand[5] = 0x00; /* offset highbyte */
616 c->operand[6] = 0x0d; /* offset lowbyte */
617
618 c->length = 12;
619
620 if (avc_write(fdtv, c, r) < 0)
621 return -EIO;
622
623 if ((r->response != AVC_RESPONSE_STABLE &&
624 r->response != AVC_RESPONSE_ACCEPTED) ||
625 (r->operand[3] << 8) + r->operand[4] != 8) {
626 dev_err(fdtv->device, "cannot read subunit identifier\n");
627 return -EINVAL;
628 }
629 return 0;
630}
631
632#define SIZEOF_ANTENNA_INPUT_INFO 22
633
634int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat)
635{
636 char buffer[sizeof(struct avc_command_frame)];
637 struct avc_command_frame *c = (void *)buffer;
638 struct avc_response_frame *r = (void *)buffer;
639 int length;
640
641 memset(c, 0, sizeof(*c));
642
643 c->ctype = AVC_CTYPE_CONTROL;
644 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
645 c->opcode = AVC_OPCODE_READ_DESCRIPTOR;
646
647 c->operand[0] = DESCRIPTOR_TUNER_STATUS;
648 c->operand[1] = 0xff; /* read_result_status */
649 c->operand[2] = 0x00; /* reserved */
650 c->operand[3] = 0; /* SIZEOF_ANTENNA_INPUT_INFO >> 8; */
651 c->operand[4] = 0; /* SIZEOF_ANTENNA_INPUT_INFO & 0xff; */
652 c->operand[5] = 0x00;
653 c->operand[6] = 0x00;
654
655 c->length = 12;
656
657 if (avc_write(fdtv, c, r) < 0)
658 return -EIO;
659
660 if (r->response != AVC_RESPONSE_STABLE &&
661 r->response != AVC_RESPONSE_ACCEPTED) {
662 dev_err(fdtv->device, "cannot read tuner status\n");
663 return -EINVAL;
664 }
665
666 length = r->operand[9];
667 if (r->operand[1] != 0x10 || length != SIZEOF_ANTENNA_INPUT_INFO) {
668 dev_err(fdtv->device, "got invalid tuner status\n");
669 return -EINVAL;
670 }
671
672 stat->active_system = r->operand[10];
673 stat->searching = r->operand[11] >> 7 & 1;
674 stat->moving = r->operand[11] >> 6 & 1;
675 stat->no_rf = r->operand[11] >> 5 & 1;
676 stat->input = r->operand[12] >> 7 & 1;
677 stat->selected_antenna = r->operand[12] & 0x7f;
678 stat->ber = r->operand[13] << 24 |
679 r->operand[14] << 16 |
680 r->operand[15] << 8 |
681 r->operand[16];
682 stat->signal_strength = r->operand[17];
683 stat->raster_frequency = r->operand[18] >> 6 & 2;
684 stat->rf_frequency = (r->operand[18] & 0x3f) << 16 |
685 r->operand[19] << 8 |
686 r->operand[20];
687 stat->man_dep_info_length = r->operand[21];
688 stat->front_end_error = r->operand[22] >> 4 & 1;
689 stat->antenna_error = r->operand[22] >> 3 & 1;
690 stat->front_end_power_status = r->operand[22] >> 1 & 1;
691 stat->power_supply = r->operand[22] & 1;
692 stat->carrier_noise_ratio = r->operand[23] << 8 |
693 r->operand[24];
694 stat->power_supply_voltage = r->operand[27];
695 stat->antenna_voltage = r->operand[28];
696 stat->firewire_bus_voltage = r->operand[29];
697 stat->ca_mmi = r->operand[30] & 1;
698 stat->ca_pmt_reply = r->operand[31] >> 7 & 1;
699 stat->ca_date_time_request = r->operand[31] >> 6 & 1;
700 stat->ca_application_info = r->operand[31] >> 5 & 1;
701 stat->ca_module_present_status = r->operand[31] >> 4 & 1;
702 stat->ca_dvb_flag = r->operand[31] >> 3 & 1;
703 stat->ca_error_flag = r->operand[31] >> 2 & 1;
704 stat->ca_initialization_status = r->operand[31] >> 1 & 1;
705
706 return 0;
707}
708
709int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
710 char conttone, char nrdiseq,
711 struct dvb_diseqc_master_cmd *diseqcmd)
712{
713 char buffer[sizeof(struct avc_command_frame)];
714 struct avc_command_frame *c = (void *)buffer;
715 struct avc_response_frame *r = (void *)buffer;
716 int i, j, k;
717
718 memset(c, 0, sizeof(*c));
719
720 c->ctype = AVC_CTYPE_CONTROL;
721 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
722 c->opcode = AVC_OPCODE_VENDOR;
723
724 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
725 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
726 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
727 c->operand[3] = SFE_VENDOR_OPCODE_LNB_CONTROL;
728
729 c->operand[4] = voltage;
730 c->operand[5] = nrdiseq;
731
732 i = 6;
733
734 for (j = 0; j < nrdiseq; j++) {
735 c->operand[i++] = diseqcmd[j].msg_len;
736
737 for (k = 0; k < diseqcmd[j].msg_len; k++)
738 c->operand[i++] = diseqcmd[j].msg[k];
739 }
740
741 c->operand[i++] = burst;
742 c->operand[i++] = conttone;
743
744 c->length = ALIGN(3 + i, 4);
745
746 if (avc_write(fdtv, c, r) < 0)
747 return -EIO;
748
749 if (r->response != AVC_RESPONSE_ACCEPTED) {
750 dev_err(fdtv->device, "LNB control failed\n");
751 return -EINVAL;
752 }
753
754 return 0;
755}
756
757int avc_register_remote_control(struct firedtv *fdtv)
758{
759 char buffer[sizeof(struct avc_command_frame)];
760 struct avc_command_frame *c = (void *)buffer;
761
762 memset(c, 0, sizeof(*c));
763
764 c->ctype = AVC_CTYPE_NOTIFY;
765 c->subunit = AVC_SUBUNIT_TYPE_UNIT | 7;
766 c->opcode = AVC_OPCODE_VENDOR;
767
768 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
769 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
770 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
771 c->operand[3] = SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL;
772
773 c->length = 8;
774
775 return avc_write(fdtv, c, NULL);
776}
777
778void avc_remote_ctrl_work(struct work_struct *work)
779{
780 struct firedtv *fdtv =
781 container_of(work, struct firedtv, remote_ctrl_work);
782
783 /* Should it be rescheduled in failure cases? */
784 avc_register_remote_control(fdtv);
785}
786
787#if 0 /* FIXME: unused */
788int avc_tuner_host2ca(struct firedtv *fdtv)
789{
790 char buffer[sizeof(struct avc_command_frame)];
791 struct avc_command_frame *c = (void *)buffer;
792 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
793
794 memset(c, 0, sizeof(*c));
795
796 c->ctype = AVC_CTYPE_CONTROL;
797 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
798 c->opcode = AVC_OPCODE_VENDOR;
799
800 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
801 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
802 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
803 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
804 c->operand[4] = 0; /* slot */
805 c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
806 c->operand[6] = 0; /* more/last */
807 c->operand[7] = 0; /* length */
808
809 c->length = 12;
810
811 if (avc_write(fdtv, c, r) < 0)
812 return -EIO;
813
814 return 0;
815}
816#endif
817
818static int get_ca_object_pos(struct avc_response_frame *r)
819{
820 int length = 1;
821
822 /* Check length of length field */
823 if (r->operand[7] & 0x80)
824 length = (r->operand[7] & 0x7f) + 1;
825 return length + 7;
826}
827
828static int get_ca_object_length(struct avc_response_frame *r)
829{
830#if 0 /* FIXME: unused */
831 int size = 0;
832 int i;
833
834 if (r->operand[7] & 0x80)
835 for (i = 0; i < (r->operand[7] & 0x7f); i++) {
836 size <<= 8;
837 size += r->operand[8 + i];
838 }
839#endif
840 return r->operand[7];
841}
842
843int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
844{
845 char buffer[sizeof(struct avc_command_frame)];
846 struct avc_command_frame *c = (void *)buffer;
847 struct avc_response_frame *r = (void *)buffer;
848 int pos;
849
850 memset(c, 0, sizeof(*c));
851
852 c->ctype = AVC_CTYPE_STATUS;
853 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
854 c->opcode = AVC_OPCODE_VENDOR;
855
856 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
857 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
858 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
859 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
860 c->operand[4] = 0; /* slot */
861 c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
862
863 c->length = 12;
864
865 if (avc_write(fdtv, c, r) < 0)
866 return -EIO;
867
868 /* FIXME: check response code and validate response data */
869
870 pos = get_ca_object_pos(r);
871 app_info[0] = (EN50221_TAG_APP_INFO >> 16) & 0xff;
872 app_info[1] = (EN50221_TAG_APP_INFO >> 8) & 0xff;
873 app_info[2] = (EN50221_TAG_APP_INFO >> 0) & 0xff;
874 app_info[3] = 6 + r->operand[pos + 4];
875 app_info[4] = 0x01;
876 memcpy(&app_info[5], &r->operand[pos], 5 + r->operand[pos + 4]);
877 *len = app_info[3] + 4;
878
879 return 0;
880}
881
882int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
883{
884 char buffer[sizeof(struct avc_command_frame)];
885 struct avc_command_frame *c = (void *)buffer;
886 struct avc_response_frame *r = (void *)buffer;
887 int pos;
888
889 memset(c, 0, sizeof(*c));
890
891 c->ctype = AVC_CTYPE_STATUS;
892 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
893 c->opcode = AVC_OPCODE_VENDOR;
894
895 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
896 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
897 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
898 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
899 c->operand[4] = 0; /* slot */
900 c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
901
902 c->length = 12;
903
904 if (avc_write(fdtv, c, r) < 0)
905 return -EIO;
906
907 pos = get_ca_object_pos(r);
908 app_info[0] = (EN50221_TAG_CA_INFO >> 16) & 0xff;
909 app_info[1] = (EN50221_TAG_CA_INFO >> 8) & 0xff;
910 app_info[2] = (EN50221_TAG_CA_INFO >> 0) & 0xff;
911 app_info[3] = 2;
912 app_info[4] = r->operand[pos + 0];
913 app_info[5] = r->operand[pos + 1];
914 *len = app_info[3] + 4;
915
916 return 0;
917}
918
919int avc_ca_reset(struct firedtv *fdtv)
920{
921 char buffer[sizeof(struct avc_command_frame)];
922 struct avc_command_frame *c = (void *)buffer;
923 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
924
925 memset(c, 0, sizeof(*c));
926
927 c->ctype = AVC_CTYPE_CONTROL;
928 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
929 c->opcode = AVC_OPCODE_VENDOR;
930
931 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
932 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
933 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
934 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
935 c->operand[4] = 0; /* slot */
936 c->operand[5] = SFE_VENDOR_TAG_CA_RESET; /* ca tag */
937 c->operand[6] = 0; /* more/last */
938 c->operand[7] = 1; /* length */
939 c->operand[8] = 0; /* force hardware reset */
940
941 c->length = 12;
942
943 if (avc_write(fdtv, c, r) < 0)
944 return -EIO;
945
946 return 0;
947}
948
949int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
950{
951 char buffer[sizeof(struct avc_command_frame)];
952 struct avc_command_frame *c = (void *)buffer;
953 struct avc_response_frame *r = (void *)buffer;
954 int list_management;
955 int program_info_length;
956 int pmt_cmd_id;
957 int read_pos;
958 int write_pos;
959 int es_info_length;
960 int crc32_csum;
961
962 memset(c, 0, sizeof(*c));
963
964 c->ctype = AVC_CTYPE_CONTROL;
965 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
966 c->opcode = AVC_OPCODE_VENDOR;
967
968 if (msg[0] != EN50221_LIST_MANAGEMENT_ONLY) {
969 dev_info(fdtv->device, "forcing list_management to ONLY\n");
970 msg[0] = EN50221_LIST_MANAGEMENT_ONLY;
971 }
972 /* We take the cmd_id from the programme level only! */
973 list_management = msg[0];
974 program_info_length = ((msg[4] & 0x0f) << 8) + msg[5];
975 if (program_info_length > 0)
976 program_info_length--; /* Remove pmt_cmd_id */
977 pmt_cmd_id = msg[6];
978
979 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
980 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
981 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
982 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
983 c->operand[4] = 0; /* slot */
984 c->operand[5] = SFE_VENDOR_TAG_CA_PMT; /* ca tag */
985 c->operand[6] = 0; /* more/last */
986 /* c->operand[7] = XXXprogram_info_length + 17; */ /* length */
987 c->operand[8] = list_management;
988 c->operand[9] = 0x01; /* pmt_cmd=OK_descramble */
989
990 /* TS program map table */
991
992 c->operand[10] = 0x02; /* Table id=2 */
993 c->operand[11] = 0x80; /* Section syntax + length */
994 /* c->operand[12] = XXXprogram_info_length + 12; */
995 c->operand[13] = msg[1]; /* Program number */
996 c->operand[14] = msg[2];
997 c->operand[15] = 0x01; /* Version number=0 + current/next=1 */
998 c->operand[16] = 0x00; /* Section number=0 */
999 c->operand[17] = 0x00; /* Last section number=0 */
1000 c->operand[18] = 0x1f; /* PCR_PID=1FFF */
1001 c->operand[19] = 0xff;
1002 c->operand[20] = (program_info_length >> 8); /* Program info length */
1003 c->operand[21] = (program_info_length & 0xff);
1004
1005 /* CA descriptors at programme level */
1006 read_pos = 6;
1007 write_pos = 22;
1008 if (program_info_length > 0) {
1009 pmt_cmd_id = msg[read_pos++];
1010 if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
1011 dev_err(fdtv->device,
1012 "invalid pmt_cmd_id %d\n", pmt_cmd_id);
1013
1014 memcpy(&c->operand[write_pos], &msg[read_pos],
1015 program_info_length);
1016 read_pos += program_info_length;
1017 write_pos += program_info_length;
1018 }
1019 while (read_pos < length) {
1020 c->operand[write_pos++] = msg[read_pos++];
1021 c->operand[write_pos++] = msg[read_pos++];
1022 c->operand[write_pos++] = msg[read_pos++];
1023 es_info_length =
1024 ((msg[read_pos] & 0x0f) << 8) + msg[read_pos + 1];
1025 read_pos += 2;
1026 if (es_info_length > 0)
1027 es_info_length--; /* Remove pmt_cmd_id */
1028 c->operand[write_pos++] = es_info_length >> 8;
1029 c->operand[write_pos++] = es_info_length & 0xff;
1030 if (es_info_length > 0) {
1031 pmt_cmd_id = msg[read_pos++];
1032 if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
1033 dev_err(fdtv->device, "invalid pmt_cmd_id %d "
1034 "at stream level\n", pmt_cmd_id);
1035
1036 memcpy(&c->operand[write_pos], &msg[read_pos],
1037 es_info_length);
1038 read_pos += es_info_length;
1039 write_pos += es_info_length;
1040 }
1041 }
1042
1043 /* CRC */
1044 c->operand[write_pos++] = 0x00;
1045 c->operand[write_pos++] = 0x00;
1046 c->operand[write_pos++] = 0x00;
1047 c->operand[write_pos++] = 0x00;
1048
1049 c->operand[7] = write_pos - 8;
1050 c->operand[12] = write_pos - 13;
1051
1052 crc32_csum = crc32_be(0, &c->operand[10], c->operand[12] - 1);
1053 c->operand[write_pos - 4] = (crc32_csum >> 24) & 0xff;
1054 c->operand[write_pos - 3] = (crc32_csum >> 16) & 0xff;
1055 c->operand[write_pos - 2] = (crc32_csum >> 8) & 0xff;
1056 c->operand[write_pos - 1] = (crc32_csum >> 0) & 0xff;
1057
1058 c->length = ALIGN(3 + write_pos, 4);
1059
1060 if (avc_write(fdtv, c, r) < 0)
1061 return -EIO;
1062
1063 if (r->response != AVC_RESPONSE_ACCEPTED) {
1064 dev_err(fdtv->device,
1065 "CA PMT failed with response 0x%x\n", r->response);
1066 return -EFAULT;
1067 }
1068
1069 return 0;
1070}
1071
1072int avc_ca_get_time_date(struct firedtv *fdtv, int *interval)
1073{
1074 char buffer[sizeof(struct avc_command_frame)];
1075 struct avc_command_frame *c = (void *)buffer;
1076 struct avc_response_frame *r = (void *)buffer;
1077
1078 memset(c, 0, sizeof(*c));
1079
1080 c->ctype = AVC_CTYPE_STATUS;
1081 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
1082 c->opcode = AVC_OPCODE_VENDOR;
1083
1084 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
1085 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
1086 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
1087 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
1088 c->operand[4] = 0; /* slot */
1089 c->operand[5] = SFE_VENDOR_TAG_CA_DATE_TIME; /* ca tag */
1090 c->operand[6] = 0; /* more/last */
1091 c->operand[7] = 0; /* length */
1092
1093 c->length = 12;
1094
1095 if (avc_write(fdtv, c, r) < 0)
1096 return -EIO;
1097
1098 /* FIXME: check response code and validate response data */
1099
1100 *interval = r->operand[get_ca_object_pos(r)];
1101
1102 return 0;
1103}
1104
1105int avc_ca_enter_menu(struct firedtv *fdtv)
1106{
1107 char buffer[sizeof(struct avc_command_frame)];
1108 struct avc_command_frame *c = (void *)buffer;
1109 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
1110
1111 memset(c, 0, sizeof(*c));
1112
1113 c->ctype = AVC_CTYPE_STATUS;
1114 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
1115 c->opcode = AVC_OPCODE_VENDOR;
1116
1117 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
1118 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
1119 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
1120 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
1121 c->operand[4] = 0; /* slot */
1122 c->operand[5] = SFE_VENDOR_TAG_CA_ENTER_MENU;
1123 c->operand[6] = 0; /* more/last */
1124 c->operand[7] = 0; /* length */
1125
1126 c->length = 12;
1127
1128 if (avc_write(fdtv, c, r) < 0)
1129 return -EIO;
1130
1131 return 0;
1132}
1133
1134int avc_ca_get_mmi(struct firedtv *fdtv, char *mmi_object, unsigned int *len)
1135{
1136 char buffer[sizeof(struct avc_command_frame)];
1137 struct avc_command_frame *c = (void *)buffer;
1138 struct avc_response_frame *r = (void *)buffer;
1139
1140 memset(c, 0, sizeof(*c));
1141
1142 c->ctype = AVC_CTYPE_STATUS;
1143 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
1144 c->opcode = AVC_OPCODE_VENDOR;
1145
1146 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
1147 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
1148 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
1149 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
1150 c->operand[4] = 0; /* slot */
1151 c->operand[5] = SFE_VENDOR_TAG_CA_MMI;
1152 c->operand[6] = 0; /* more/last */
1153 c->operand[7] = 0; /* length */
1154
1155 c->length = 12;
1156
1157 if (avc_write(fdtv, c, r) < 0)
1158 return -EIO;
1159
1160 /* FIXME: check response code and validate response data */
1161
1162 *len = get_ca_object_length(r);
1163 memcpy(mmi_object, &r->operand[get_ca_object_pos(r)], *len);
1164
1165 return 0;
1166}
1167
1168#define CMP_OUTPUT_PLUG_CONTROL_REG_0 0xfffff0000904ULL
1169
1170static int cmp_read(struct firedtv *fdtv, void *buf, u64 addr, size_t len)
1171{
1172 int ret;
1173
1174 if (mutex_lock_interruptible(&fdtv->avc_mutex))
1175 return -EINTR;
1176
1177 ret = fdtv->backend->read(fdtv, addr, buf, len);
1178 if (ret < 0)
1179 dev_err(fdtv->device, "CMP: read I/O error\n");
1180
1181 mutex_unlock(&fdtv->avc_mutex);
1182 return ret;
1183}
1184
1185static int cmp_lock(struct firedtv *fdtv, void *data, u64 addr, __be32 arg)
1186{
1187 int ret;
1188
1189 if (mutex_lock_interruptible(&fdtv->avc_mutex))
1190 return -EINTR;
1191
1192 ret = fdtv->backend->lock(fdtv, addr, data, arg);
1193 if (ret < 0)
1194 dev_err(fdtv->device, "CMP: lock I/O error\n");
1195
1196 mutex_unlock(&fdtv->avc_mutex);
1197 return ret;
1198}
1199
1200static inline u32 get_opcr(__be32 opcr, u32 mask, u32 shift)
1201{
1202 return (be32_to_cpu(opcr) >> shift) & mask;
1203}
1204
1205static inline void set_opcr(__be32 *opcr, u32 value, u32 mask, u32 shift)
1206{
1207 *opcr &= ~cpu_to_be32(mask << shift);
1208 *opcr |= cpu_to_be32((value & mask) << shift);
1209}
1210
1211#define get_opcr_online(v) get_opcr((v), 0x1, 31)
1212#define get_opcr_p2p_connections(v) get_opcr((v), 0x3f, 24)
1213#define get_opcr_channel(v) get_opcr((v), 0x3f, 16)
1214
1215#define set_opcr_p2p_connections(p, v) set_opcr((p), (v), 0x3f, 24)
1216#define set_opcr_channel(p, v) set_opcr((p), (v), 0x3f, 16)
1217#define set_opcr_data_rate(p, v) set_opcr((p), (v), 0x3, 14)
1218#define set_opcr_overhead_id(p, v) set_opcr((p), (v), 0xf, 10)
1219
1220int cmp_establish_pp_connection(struct firedtv *fdtv, int plug, int channel)
1221{
1222 __be32 old_opcr, opcr;
1223 u64 opcr_address = CMP_OUTPUT_PLUG_CONTROL_REG_0 + (plug << 2);
1224 int attempts = 0;
1225 int ret;
1226
1227 ret = cmp_read(fdtv, &opcr, opcr_address, 4);
1228 if (ret < 0)
1229 return ret;
1230
1231repeat:
1232 if (!get_opcr_online(opcr)) {
1233 dev_err(fdtv->device, "CMP: output offline\n");
1234 return -EBUSY;
1235 }
1236
1237 old_opcr = opcr;
1238
1239 if (get_opcr_p2p_connections(opcr)) {
1240 if (get_opcr_channel(opcr) != channel) {
1241 dev_err(fdtv->device, "CMP: cannot change channel\n");
1242 return -EBUSY;
1243 }
1244 dev_info(fdtv->device, "CMP: overlaying connection\n");
1245
1246 /* We don't allocate isochronous resources. */
1247 } else {
1248 set_opcr_channel(&opcr, channel);
1249 set_opcr_data_rate(&opcr, 2); /* S400 */
1250
1251 /* FIXME: this is for the worst case - optimize */
1252 set_opcr_overhead_id(&opcr, 0);
1253
1254 /*
1255 * FIXME: allocate isochronous channel and bandwidth at IRM
1256 * fdtv->backend->alloc_resources(fdtv, channels_mask, bw);
1257 */
1258 }
1259
1260 set_opcr_p2p_connections(&opcr, get_opcr_p2p_connections(opcr) + 1);
1261
1262 ret = cmp_lock(fdtv, &opcr, opcr_address, old_opcr);
1263 if (ret < 0)
1264 return ret;
1265
1266 if (old_opcr != opcr) {
1267 /*
1268 * FIXME: if old_opcr.P2P_Connections > 0,
1269 * deallocate isochronous channel and bandwidth at IRM
1270 * if (...)
1271 * fdtv->backend->dealloc_resources(fdtv, channel, bw);
1272 */
1273
1274 if (++attempts < 6) /* arbitrary limit */
1275 goto repeat;
1276 return -EBUSY;
1277 }
1278
1279 return 0;
1280}
1281
1282void cmp_break_pp_connection(struct firedtv *fdtv, int plug, int channel)
1283{
1284 __be32 old_opcr, opcr;
1285 u64 opcr_address = CMP_OUTPUT_PLUG_CONTROL_REG_0 + (plug << 2);
1286 int attempts = 0;
1287
1288 if (cmp_read(fdtv, &opcr, opcr_address, 4) < 0)
1289 return;
1290
1291repeat:
1292 if (!get_opcr_online(opcr) || !get_opcr_p2p_connections(opcr) ||
1293 get_opcr_channel(opcr) != channel) {
1294 dev_err(fdtv->device, "CMP: no connection to break\n");
1295 return;
1296 }
1297
1298 old_opcr = opcr;
1299 set_opcr_p2p_connections(&opcr, get_opcr_p2p_connections(opcr) - 1);
1300
1301 if (cmp_lock(fdtv, &opcr, opcr_address, old_opcr) < 0)
1302 return;
1303
1304 if (old_opcr != opcr) {
1305 /*
1306 * FIXME: if old_opcr.P2P_Connections == 1, i.e. we were last
1307 * owner, deallocate isochronous channel and bandwidth at IRM
1308 * if (...)
1309 * fdtv->backend->dealloc_resources(fdtv, channel, bw);
1310 */
1311
1312 if (++attempts < 6) /* arbitrary limit */
1313 goto repeat;
1314 }
1315}
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
new file mode 100644
index 000000000000..eeb80d0ea3ff
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -0,0 +1,260 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/dvb/ca.h>
15#include <linux/fs.h>
16#include <linux/module.h>
17
18#include <dvbdev.h>
19
20#include "firedtv.h"
21
22#define EN50221_TAG_APP_INFO_ENQUIRY 0x9f8020
23#define EN50221_TAG_CA_INFO_ENQUIRY 0x9f8030
24#define EN50221_TAG_CA_PMT 0x9f8032
25#define EN50221_TAG_ENTER_MENU 0x9f8022
26
27static int fdtv_ca_ready(struct firedtv_tuner_status *stat)
28{
29 return stat->ca_initialization_status == 1 &&
30 stat->ca_error_flag == 0 &&
31 stat->ca_dvb_flag == 1 &&
32 stat->ca_module_present_status == 1;
33}
34
35static int fdtv_get_ca_flags(struct firedtv_tuner_status *stat)
36{
37 int flags = 0;
38
39 if (stat->ca_module_present_status == 1)
40 flags |= CA_CI_MODULE_PRESENT;
41 if (stat->ca_initialization_status == 1 &&
42 stat->ca_error_flag == 0 &&
43 stat->ca_dvb_flag == 1)
44 flags |= CA_CI_MODULE_READY;
45 return flags;
46}
47
48static int fdtv_ca_reset(struct firedtv *fdtv)
49{
50 return avc_ca_reset(fdtv) ? -EFAULT : 0;
51}
52
53static int fdtv_ca_get_caps(void *arg)
54{
55 struct ca_caps *cap = arg;
56
57 cap->slot_num = 1;
58 cap->slot_type = CA_CI;
59 cap->descr_num = 1;
60 cap->descr_type = CA_ECD;
61 return 0;
62}
63
64static int fdtv_ca_get_slot_info(struct firedtv *fdtv, void *arg)
65{
66 struct firedtv_tuner_status stat;
67 struct ca_slot_info *slot = arg;
68
69 if (avc_tuner_status(fdtv, &stat))
70 return -EFAULT;
71
72 if (slot->num != 0)
73 return -EFAULT;
74
75 slot->type = CA_CI;
76 slot->flags = fdtv_get_ca_flags(&stat);
77 return 0;
78}
79
80static int fdtv_ca_app_info(struct firedtv *fdtv, void *arg)
81{
82 struct ca_msg *reply = arg;
83
84 return avc_ca_app_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
85}
86
87static int fdtv_ca_info(struct firedtv *fdtv, void *arg)
88{
89 struct ca_msg *reply = arg;
90
91 return avc_ca_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
92}
93
94static int fdtv_ca_get_mmi(struct firedtv *fdtv, void *arg)
95{
96 struct ca_msg *reply = arg;
97
98 return avc_ca_get_mmi(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
99}
100
101static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
102{
103 struct firedtv_tuner_status stat;
104 int err;
105
106 switch (fdtv->ca_last_command) {
107 case EN50221_TAG_APP_INFO_ENQUIRY:
108 err = fdtv_ca_app_info(fdtv, arg);
109 break;
110 case EN50221_TAG_CA_INFO_ENQUIRY:
111 err = fdtv_ca_info(fdtv, arg);
112 break;
113 default:
114 if (avc_tuner_status(fdtv, &stat))
115 err = -EFAULT;
116 else if (stat.ca_mmi == 1)
117 err = fdtv_ca_get_mmi(fdtv, arg);
118 else {
119 dev_info(fdtv->device, "unhandled CA message 0x%08x\n",
120 fdtv->ca_last_command);
121 err = -EFAULT;
122 }
123 }
124 fdtv->ca_last_command = 0;
125 return err;
126}
127
128static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
129{
130 struct ca_msg *msg = arg;
131 int data_pos;
132 int data_length;
133 int i;
134
135 data_pos = 4;
136 if (msg->msg[3] & 0x80) {
137 data_length = 0;
138 for (i = 0; i < (msg->msg[3] & 0x7f); i++)
139 data_length = (data_length << 8) + msg->msg[data_pos++];
140 } else {
141 data_length = msg->msg[3];
142 }
143
144 return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length) ? -EFAULT : 0;
145}
146
147static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
148{
149 struct ca_msg *msg = arg;
150 int err;
151
152 /* Do we need a semaphore for this? */
153 fdtv->ca_last_command =
154 (msg->msg[0] << 16) + (msg->msg[1] << 8) + msg->msg[2];
155 switch (fdtv->ca_last_command) {
156 case EN50221_TAG_CA_PMT:
157 err = fdtv_ca_pmt(fdtv, arg);
158 break;
159 case EN50221_TAG_APP_INFO_ENQUIRY:
160 /* handled in ca_get_msg */
161 err = 0;
162 break;
163 case EN50221_TAG_CA_INFO_ENQUIRY:
164 /* handled in ca_get_msg */
165 err = 0;
166 break;
167 case EN50221_TAG_ENTER_MENU:
168 err = avc_ca_enter_menu(fdtv);
169 break;
170 default:
171 dev_err(fdtv->device, "unhandled CA message 0x%08x\n",
172 fdtv->ca_last_command);
173 err = -EFAULT;
174 }
175 return err;
176}
177
178static int fdtv_ca_ioctl(struct inode *inode, struct file *file,
179 unsigned int cmd, void *arg)
180{
181 struct dvb_device *dvbdev = file->private_data;
182 struct firedtv *fdtv = dvbdev->priv;
183 struct firedtv_tuner_status stat;
184 int err;
185
186 switch (cmd) {
187 case CA_RESET:
188 err = fdtv_ca_reset(fdtv);
189 break;
190 case CA_GET_CAP:
191 err = fdtv_ca_get_caps(arg);
192 break;
193 case CA_GET_SLOT_INFO:
194 err = fdtv_ca_get_slot_info(fdtv, arg);
195 break;
196 case CA_GET_MSG:
197 err = fdtv_ca_get_msg(fdtv, arg);
198 break;
199 case CA_SEND_MSG:
200 err = fdtv_ca_send_msg(fdtv, arg);
201 break;
202 default:
203 dev_info(fdtv->device, "unhandled CA ioctl %u\n", cmd);
204 err = -EOPNOTSUPP;
205 }
206
207 /* FIXME Is this necessary? */
208 avc_tuner_status(fdtv, &stat);
209
210 return err;
211}
212
213static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
214{
215 return POLLIN;
216}
217
218static struct file_operations fdtv_ca_fops = {
219 .owner = THIS_MODULE,
220 .ioctl = dvb_generic_ioctl,
221 .open = dvb_generic_open,
222 .release = dvb_generic_release,
223 .poll = fdtv_ca_io_poll,
224};
225
226static struct dvb_device fdtv_ca = {
227 .users = 1,
228 .readers = 1,
229 .writers = 1,
230 .fops = &fdtv_ca_fops,
231 .kernel_ioctl = fdtv_ca_ioctl,
232};
233
234int fdtv_ca_register(struct firedtv *fdtv)
235{
236 struct firedtv_tuner_status stat;
237 int err;
238
239 if (avc_tuner_status(fdtv, &stat))
240 return -EINVAL;
241
242 if (!fdtv_ca_ready(&stat))
243 return -EFAULT;
244
245 err = dvb_register_device(&fdtv->adapter, &fdtv->cadev,
246 &fdtv_ca, fdtv, DVB_DEVICE_CA);
247
248 if (stat.ca_application_info == 0)
249 dev_err(fdtv->device, "CaApplicationInfo is not set\n");
250 if (stat.ca_date_time_request == 1)
251 avc_ca_get_time_date(fdtv, &fdtv->ca_time_interval);
252
253 return err;
254}
255
256void fdtv_ca_release(struct firedtv *fdtv)
257{
258 if (fdtv->cadev)
259 dvb_unregister_device(fdtv->cadev);
260}
diff --git a/drivers/media/dvb/firewire/firedtv-dvb.c b/drivers/media/dvb/firewire/firedtv-dvb.c
new file mode 100644
index 000000000000..9d308dd32a5c
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-dvb.c
@@ -0,0 +1,364 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/device.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/mod_devicetable.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/wait.h>
24#include <linux/workqueue.h>
25
26#include <dmxdev.h>
27#include <dvb_demux.h>
28#include <dvbdev.h>
29#include <dvb_frontend.h>
30
31#include "firedtv.h"
32
33static int alloc_channel(struct firedtv *fdtv)
34{
35 int i;
36
37 for (i = 0; i < 16; i++)
38 if (!__test_and_set_bit(i, &fdtv->channel_active))
39 break;
40 return i;
41}
42
43static void collect_channels(struct firedtv *fdtv, int *pidc, u16 pid[])
44{
45 int i, n;
46
47 for (i = 0, n = 0; i < 16; i++)
48 if (test_bit(i, &fdtv->channel_active))
49 pid[n++] = fdtv->channel_pid[i];
50 *pidc = n;
51}
52
53static inline void dealloc_channel(struct firedtv *fdtv, int i)
54{
55 __clear_bit(i, &fdtv->channel_active);
56}
57
58int fdtv_start_feed(struct dvb_demux_feed *dvbdmxfeed)
59{
60 struct firedtv *fdtv = dvbdmxfeed->demux->priv;
61 int pidc, c, ret;
62 u16 pids[16];
63
64 switch (dvbdmxfeed->type) {
65 case DMX_TYPE_TS:
66 case DMX_TYPE_SEC:
67 break;
68 default:
69 dev_err(fdtv->device, "can't start dmx feed: invalid type %u\n",
70 dvbdmxfeed->type);
71 return -EINVAL;
72 }
73
74 if (mutex_lock_interruptible(&fdtv->demux_mutex))
75 return -EINTR;
76
77 if (dvbdmxfeed->type == DMX_TYPE_TS) {
78 switch (dvbdmxfeed->pes_type) {
79 case DMX_TS_PES_VIDEO:
80 case DMX_TS_PES_AUDIO:
81 case DMX_TS_PES_TELETEXT:
82 case DMX_TS_PES_PCR:
83 case DMX_TS_PES_OTHER:
84 c = alloc_channel(fdtv);
85 break;
86 default:
87 dev_err(fdtv->device,
88 "can't start dmx feed: invalid pes type %u\n",
89 dvbdmxfeed->pes_type);
90 ret = -EINVAL;
91 goto out;
92 }
93 } else {
94 c = alloc_channel(fdtv);
95 }
96
97 if (c > 15) {
98 dev_err(fdtv->device, "can't start dmx feed: busy\n");
99 ret = -EBUSY;
100 goto out;
101 }
102
103 dvbdmxfeed->priv = (typeof(dvbdmxfeed->priv))(unsigned long)c;
104 fdtv->channel_pid[c] = dvbdmxfeed->pid;
105 collect_channels(fdtv, &pidc, pids);
106
107 if (dvbdmxfeed->pid == 8192) {
108 ret = avc_tuner_get_ts(fdtv);
109 if (ret) {
110 dealloc_channel(fdtv, c);
111 dev_err(fdtv->device, "can't get TS\n");
112 goto out;
113 }
114 } else {
115 ret = avc_tuner_set_pids(fdtv, pidc, pids);
116 if (ret) {
117 dealloc_channel(fdtv, c);
118 dev_err(fdtv->device, "can't set PIDs\n");
119 goto out;
120 }
121 }
122out:
123 mutex_unlock(&fdtv->demux_mutex);
124
125 return ret;
126}
127
128int fdtv_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
129{
130 struct dvb_demux *demux = dvbdmxfeed->demux;
131 struct firedtv *fdtv = demux->priv;
132 int pidc, c, ret;
133 u16 pids[16];
134
135 if (dvbdmxfeed->type == DMX_TYPE_TS &&
136 !((dvbdmxfeed->ts_type & TS_PACKET) &&
137 (demux->dmx.frontend->source != DMX_MEMORY_FE))) {
138
139 if (dvbdmxfeed->ts_type & TS_DECODER) {
140 if (dvbdmxfeed->pes_type >= DMX_TS_PES_OTHER ||
141 !demux->pesfilter[dvbdmxfeed->pes_type])
142 return -EINVAL;
143
144 demux->pids[dvbdmxfeed->pes_type] |= 0x8000;
145 demux->pesfilter[dvbdmxfeed->pes_type] = NULL;
146 }
147
148 if (!(dvbdmxfeed->ts_type & TS_DECODER &&
149 dvbdmxfeed->pes_type < DMX_TS_PES_OTHER))
150 return 0;
151 }
152
153 if (mutex_lock_interruptible(&fdtv->demux_mutex))
154 return -EINTR;
155
156 c = (unsigned long)dvbdmxfeed->priv;
157 dealloc_channel(fdtv, c);
158 collect_channels(fdtv, &pidc, pids);
159
160 ret = avc_tuner_set_pids(fdtv, pidc, pids);
161
162 mutex_unlock(&fdtv->demux_mutex);
163
164 return ret;
165}
166
167DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
168
169int fdtv_dvb_register(struct firedtv *fdtv)
170{
171 int err;
172
173 err = dvb_register_adapter(&fdtv->adapter, fdtv_model_names[fdtv->type],
174 THIS_MODULE, fdtv->device, adapter_nr);
175 if (err < 0)
176 goto fail_log;
177
178 /*DMX_TS_FILTERING | DMX_SECTION_FILTERING*/
179 fdtv->demux.dmx.capabilities = 0;
180
181 fdtv->demux.priv = fdtv;
182 fdtv->demux.filternum = 16;
183 fdtv->demux.feednum = 16;
184 fdtv->demux.start_feed = fdtv_start_feed;
185 fdtv->demux.stop_feed = fdtv_stop_feed;
186 fdtv->demux.write_to_decoder = NULL;
187
188 err = dvb_dmx_init(&fdtv->demux);
189 if (err)
190 goto fail_unreg_adapter;
191
192 fdtv->dmxdev.filternum = 16;
193 fdtv->dmxdev.demux = &fdtv->demux.dmx;
194 fdtv->dmxdev.capabilities = 0;
195
196 err = dvb_dmxdev_init(&fdtv->dmxdev, &fdtv->adapter);
197 if (err)
198 goto fail_dmx_release;
199
200 fdtv->frontend.source = DMX_FRONTEND_0;
201
202 err = fdtv->demux.dmx.add_frontend(&fdtv->demux.dmx, &fdtv->frontend);
203 if (err)
204 goto fail_dmxdev_release;
205
206 err = fdtv->demux.dmx.connect_frontend(&fdtv->demux.dmx,
207 &fdtv->frontend);
208 if (err)
209 goto fail_rem_frontend;
210
211 dvb_net_init(&fdtv->adapter, &fdtv->dvbnet, &fdtv->demux.dmx);
212
213 fdtv_frontend_init(fdtv);
214 err = dvb_register_frontend(&fdtv->adapter, &fdtv->fe);
215 if (err)
216 goto fail_net_release;
217
218 err = fdtv_ca_register(fdtv);
219 if (err)
220 dev_info(fdtv->device,
221 "Conditional Access Module not enabled\n");
222 return 0;
223
224fail_net_release:
225 dvb_net_release(&fdtv->dvbnet);
226 fdtv->demux.dmx.close(&fdtv->demux.dmx);
227fail_rem_frontend:
228 fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend);
229fail_dmxdev_release:
230 dvb_dmxdev_release(&fdtv->dmxdev);
231fail_dmx_release:
232 dvb_dmx_release(&fdtv->demux);
233fail_unreg_adapter:
234 dvb_unregister_adapter(&fdtv->adapter);
235fail_log:
236 dev_err(fdtv->device, "DVB initialization failed\n");
237 return err;
238}
239
240void fdtv_dvb_unregister(struct firedtv *fdtv)
241{
242 fdtv_ca_release(fdtv);
243 dvb_unregister_frontend(&fdtv->fe);
244 dvb_net_release(&fdtv->dvbnet);
245 fdtv->demux.dmx.close(&fdtv->demux.dmx);
246 fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend);
247 dvb_dmxdev_release(&fdtv->dmxdev);
248 dvb_dmx_release(&fdtv->demux);
249 dvb_unregister_adapter(&fdtv->adapter);
250}
251
252const char *fdtv_model_names[] = {
253 [FIREDTV_UNKNOWN] = "unknown type",
254 [FIREDTV_DVB_S] = "FireDTV S/CI",
255 [FIREDTV_DVB_C] = "FireDTV C/CI",
256 [FIREDTV_DVB_T] = "FireDTV T/CI",
257 [FIREDTV_DVB_S2] = "FireDTV S2 ",
258};
259
260struct firedtv *fdtv_alloc(struct device *dev,
261 const struct firedtv_backend *backend,
262 const char *name, size_t name_len)
263{
264 struct firedtv *fdtv;
265 int i;
266
267 fdtv = kzalloc(sizeof(*fdtv), GFP_KERNEL);
268 if (!fdtv)
269 return NULL;
270
271 dev->driver_data = fdtv;
272 fdtv->device = dev;
273 fdtv->isochannel = -1;
274 fdtv->voltage = 0xff;
275 fdtv->tone = 0xff;
276 fdtv->backend = backend;
277
278 mutex_init(&fdtv->avc_mutex);
279 init_waitqueue_head(&fdtv->avc_wait);
280 fdtv->avc_reply_received = true;
281 mutex_init(&fdtv->demux_mutex);
282 INIT_WORK(&fdtv->remote_ctrl_work, avc_remote_ctrl_work);
283
284 for (i = ARRAY_SIZE(fdtv_model_names); --i; )
285 if (strlen(fdtv_model_names[i]) <= name_len &&
286 strncmp(name, fdtv_model_names[i], name_len) == 0)
287 break;
288 fdtv->type = i;
289
290 return fdtv;
291}
292
293#define MATCH_FLAGS (IEEE1394_MATCH_VENDOR_ID | IEEE1394_MATCH_MODEL_ID | \
294 IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION)
295
296#define DIGITAL_EVERYWHERE_OUI 0x001287
297#define AVC_UNIT_SPEC_ID_ENTRY 0x00a02d
298#define AVC_SW_VERSION_ENTRY 0x010001
299
300static struct ieee1394_device_id fdtv_id_table[] = {
301 {
302 /* FloppyDTV S/CI and FloppyDTV S2 */
303 .match_flags = MATCH_FLAGS,
304 .vendor_id = DIGITAL_EVERYWHERE_OUI,
305 .model_id = 0x000024,
306 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
307 .version = AVC_SW_VERSION_ENTRY,
308 }, {
309 /* FloppyDTV T/CI */
310 .match_flags = MATCH_FLAGS,
311 .vendor_id = DIGITAL_EVERYWHERE_OUI,
312 .model_id = 0x000025,
313 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
314 .version = AVC_SW_VERSION_ENTRY,
315 }, {
316 /* FloppyDTV C/CI */
317 .match_flags = MATCH_FLAGS,
318 .vendor_id = DIGITAL_EVERYWHERE_OUI,
319 .model_id = 0x000026,
320 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
321 .version = AVC_SW_VERSION_ENTRY,
322 }, {
323 /* FireDTV S/CI and FloppyDTV S2 */
324 .match_flags = MATCH_FLAGS,
325 .vendor_id = DIGITAL_EVERYWHERE_OUI,
326 .model_id = 0x000034,
327 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
328 .version = AVC_SW_VERSION_ENTRY,
329 }, {
330 /* FireDTV T/CI */
331 .match_flags = MATCH_FLAGS,
332 .vendor_id = DIGITAL_EVERYWHERE_OUI,
333 .model_id = 0x000035,
334 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
335 .version = AVC_SW_VERSION_ENTRY,
336 }, {
337 /* FireDTV C/CI */
338 .match_flags = MATCH_FLAGS,
339 .vendor_id = DIGITAL_EVERYWHERE_OUI,
340 .model_id = 0x000036,
341 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
342 .version = AVC_SW_VERSION_ENTRY,
343 }, {}
344};
345MODULE_DEVICE_TABLE(ieee1394, fdtv_id_table);
346
347static int __init fdtv_init(void)
348{
349 return fdtv_1394_init(fdtv_id_table);
350}
351
352static void __exit fdtv_exit(void)
353{
354 fdtv_1394_exit();
355}
356
357module_init(fdtv_init);
358module_exit(fdtv_exit);
359
360MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>");
361MODULE_AUTHOR("Ben Backx <ben@bbackx.com>");
362MODULE_DESCRIPTION("FireDTV DVB Driver");
363MODULE_LICENSE("GPL");
364MODULE_SUPPORTED_DEVICE("FireDTV DVB");
diff --git a/drivers/media/dvb/firewire/firedtv-fe.c b/drivers/media/dvb/firewire/firedtv-fe.c
new file mode 100644
index 000000000000..7ba43630a25d
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-fe.c
@@ -0,0 +1,247 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/types.h>
18
19#include <dvb_frontend.h>
20
21#include "firedtv.h"
22
23static int fdtv_dvb_init(struct dvb_frontend *fe)
24{
25 struct firedtv *fdtv = fe->sec_priv;
26 int err;
27
28 /* FIXME - allocate free channel at IRM */
29 fdtv->isochannel = fdtv->adapter.num;
30
31 err = cmp_establish_pp_connection(fdtv, fdtv->subunit,
32 fdtv->isochannel);
33 if (err) {
34 dev_err(fdtv->device,
35 "could not establish point to point connection\n");
36 return err;
37 }
38
39 return fdtv->backend->start_iso(fdtv);
40}
41
42static int fdtv_sleep(struct dvb_frontend *fe)
43{
44 struct firedtv *fdtv = fe->sec_priv;
45
46 fdtv->backend->stop_iso(fdtv);
47 cmp_break_pp_connection(fdtv, fdtv->subunit, fdtv->isochannel);
48 fdtv->isochannel = -1;
49 return 0;
50}
51
52#define LNBCONTROL_DONTCARE 0xff
53
54static int fdtv_diseqc_send_master_cmd(struct dvb_frontend *fe,
55 struct dvb_diseqc_master_cmd *cmd)
56{
57 struct firedtv *fdtv = fe->sec_priv;
58
59 return avc_lnb_control(fdtv, LNBCONTROL_DONTCARE, LNBCONTROL_DONTCARE,
60 LNBCONTROL_DONTCARE, 1, cmd);
61}
62
63static int fdtv_diseqc_send_burst(struct dvb_frontend *fe,
64 fe_sec_mini_cmd_t minicmd)
65{
66 return 0;
67}
68
69static int fdtv_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
70{
71 struct firedtv *fdtv = fe->sec_priv;
72
73 fdtv->tone = tone;
74 return 0;
75}
76
77static int fdtv_set_voltage(struct dvb_frontend *fe,
78 fe_sec_voltage_t voltage)
79{
80 struct firedtv *fdtv = fe->sec_priv;
81
82 fdtv->voltage = voltage;
83 return 0;
84}
85
86static int fdtv_read_status(struct dvb_frontend *fe, fe_status_t *status)
87{
88 struct firedtv *fdtv = fe->sec_priv;
89 struct firedtv_tuner_status stat;
90
91 if (avc_tuner_status(fdtv, &stat))
92 return -EINVAL;
93
94 if (stat.no_rf)
95 *status = 0;
96 else
97 *status = FE_HAS_SIGNAL | FE_HAS_VITERBI | FE_HAS_SYNC |
98 FE_HAS_CARRIER | FE_HAS_LOCK;
99 return 0;
100}
101
102static int fdtv_read_ber(struct dvb_frontend *fe, u32 *ber)
103{
104 struct firedtv *fdtv = fe->sec_priv;
105 struct firedtv_tuner_status stat;
106
107 if (avc_tuner_status(fdtv, &stat))
108 return -EINVAL;
109
110 *ber = stat.ber;
111 return 0;
112}
113
114static int fdtv_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
115{
116 struct firedtv *fdtv = fe->sec_priv;
117 struct firedtv_tuner_status stat;
118
119 if (avc_tuner_status(fdtv, &stat))
120 return -EINVAL;
121
122 *strength = stat.signal_strength << 8;
123 return 0;
124}
125
126static int fdtv_read_snr(struct dvb_frontend *fe, u16 *snr)
127{
128 struct firedtv *fdtv = fe->sec_priv;
129 struct firedtv_tuner_status stat;
130
131 if (avc_tuner_status(fdtv, &stat))
132 return -EINVAL;
133
134 /* C/N[dB] = -10 * log10(snr / 65535) */
135 *snr = stat.carrier_noise_ratio * 257;
136 return 0;
137}
138
139static int fdtv_read_uncorrected_blocks(struct dvb_frontend *fe, u32 *ucblocks)
140{
141 return -EOPNOTSUPP;
142}
143
144#define ACCEPTED 0x9
145
146static int fdtv_set_frontend(struct dvb_frontend *fe,
147 struct dvb_frontend_parameters *params)
148{
149 struct firedtv *fdtv = fe->sec_priv;
150
151 /* FIXME: avc_tuner_dsd never returns ACCEPTED. Check status? */
152 if (avc_tuner_dsd(fdtv, params) != ACCEPTED)
153 return -EINVAL;
154 else
155 return 0; /* not sure of this... */
156}
157
158static int fdtv_get_frontend(struct dvb_frontend *fe,
159 struct dvb_frontend_parameters *params)
160{
161 return -EOPNOTSUPP;
162}
163
164void fdtv_frontend_init(struct firedtv *fdtv)
165{
166 struct dvb_frontend_ops *ops = &fdtv->fe.ops;
167 struct dvb_frontend_info *fi = &ops->info;
168
169 ops->init = fdtv_dvb_init;
170 ops->sleep = fdtv_sleep;
171
172 ops->set_frontend = fdtv_set_frontend;
173 ops->get_frontend = fdtv_get_frontend;
174
175 ops->read_status = fdtv_read_status;
176 ops->read_ber = fdtv_read_ber;
177 ops->read_signal_strength = fdtv_read_signal_strength;
178 ops->read_snr = fdtv_read_snr;
179 ops->read_ucblocks = fdtv_read_uncorrected_blocks;
180
181 ops->diseqc_send_master_cmd = fdtv_diseqc_send_master_cmd;
182 ops->diseqc_send_burst = fdtv_diseqc_send_burst;
183 ops->set_tone = fdtv_set_tone;
184 ops->set_voltage = fdtv_set_voltage;
185
186 switch (fdtv->type) {
187 case FIREDTV_DVB_S:
188 case FIREDTV_DVB_S2:
189 fi->type = FE_QPSK;
190
191 fi->frequency_min = 950000;
192 fi->frequency_max = 2150000;
193 fi->frequency_stepsize = 125;
194 fi->symbol_rate_min = 1000000;
195 fi->symbol_rate_max = 40000000;
196
197 fi->caps = FE_CAN_INVERSION_AUTO |
198 FE_CAN_FEC_1_2 |
199 FE_CAN_FEC_2_3 |
200 FE_CAN_FEC_3_4 |
201 FE_CAN_FEC_5_6 |
202 FE_CAN_FEC_7_8 |
203 FE_CAN_FEC_AUTO |
204 FE_CAN_QPSK;
205 break;
206
207 case FIREDTV_DVB_C:
208 fi->type = FE_QAM;
209
210 fi->frequency_min = 47000000;
211 fi->frequency_max = 866000000;
212 fi->frequency_stepsize = 62500;
213 fi->symbol_rate_min = 870000;
214 fi->symbol_rate_max = 6900000;
215
216 fi->caps = FE_CAN_INVERSION_AUTO |
217 FE_CAN_QAM_16 |
218 FE_CAN_QAM_32 |
219 FE_CAN_QAM_64 |
220 FE_CAN_QAM_128 |
221 FE_CAN_QAM_256 |
222 FE_CAN_QAM_AUTO;
223 break;
224
225 case FIREDTV_DVB_T:
226 fi->type = FE_OFDM;
227
228 fi->frequency_min = 49000000;
229 fi->frequency_max = 861000000;
230 fi->frequency_stepsize = 62500;
231
232 fi->caps = FE_CAN_INVERSION_AUTO |
233 FE_CAN_FEC_2_3 |
234 FE_CAN_TRANSMISSION_MODE_AUTO |
235 FE_CAN_GUARD_INTERVAL_AUTO |
236 FE_CAN_HIERARCHY_AUTO;
237 break;
238
239 default:
240 dev_err(fdtv->device, "no frontend for model type %d\n",
241 fdtv->type);
242 }
243 strcpy(fi->name, fdtv_model_names[fdtv->type]);
244
245 fdtv->fe.dvb = &fdtv->adapter;
246 fdtv->fe.sec_priv = fdtv;
247}
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
new file mode 100644
index 000000000000..46a6324d7b73
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -0,0 +1,190 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of
9 * the License, or (at your option) any later version.
10 */
11
12#include <linux/bitops.h>
13#include <linux/input.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/types.h>
17
18#include "firedtv.h"
19
20/* fixed table with older keycodes, geared towards MythTV */
21const static u16 oldtable[] = {
22
23 /* code from device: 0x4501...0x451f */
24
25 KEY_ESC,
26 KEY_F9,
27 KEY_1,
28 KEY_2,
29 KEY_3,
30 KEY_4,
31 KEY_5,
32 KEY_6,
33 KEY_7,
34 KEY_8,
35 KEY_9,
36 KEY_I,
37 KEY_0,
38 KEY_ENTER,
39 KEY_RED,
40 KEY_UP,
41 KEY_GREEN,
42 KEY_F10,
43 KEY_SPACE,
44 KEY_F11,
45 KEY_YELLOW,
46 KEY_DOWN,
47 KEY_BLUE,
48 KEY_Z,
49 KEY_P,
50 KEY_PAGEDOWN,
51 KEY_LEFT,
52 KEY_W,
53 KEY_RIGHT,
54 KEY_P,
55 KEY_M,
56
57 /* code from device: 0x4540...0x4542 */
58
59 KEY_R,
60 KEY_V,
61 KEY_C,
62};
63
64/* user-modifiable table for a remote as sold in 2008 */
65const static u16 keytable[] = {
66
67 /* code from device: 0x0300...0x031f */
68
69 [0x00] = KEY_POWER,
70 [0x01] = KEY_SLEEP,
71 [0x02] = KEY_STOP,
72 [0x03] = KEY_OK,
73 [0x04] = KEY_RIGHT,
74 [0x05] = KEY_1,
75 [0x06] = KEY_2,
76 [0x07] = KEY_3,
77 [0x08] = KEY_LEFT,
78 [0x09] = KEY_4,
79 [0x0a] = KEY_5,
80 [0x0b] = KEY_6,
81 [0x0c] = KEY_UP,
82 [0x0d] = KEY_7,
83 [0x0e] = KEY_8,
84 [0x0f] = KEY_9,
85 [0x10] = KEY_DOWN,
86 [0x11] = KEY_TITLE, /* "OSD" - fixme */
87 [0x12] = KEY_0,
88 [0x13] = KEY_F20, /* "16:9" - fixme */
89 [0x14] = KEY_SCREEN, /* "FULL" - fixme */
90 [0x15] = KEY_MUTE,
91 [0x16] = KEY_SUBTITLE,
92 [0x17] = KEY_RECORD,
93 [0x18] = KEY_TEXT,
94 [0x19] = KEY_AUDIO,
95 [0x1a] = KEY_RED,
96 [0x1b] = KEY_PREVIOUS,
97 [0x1c] = KEY_REWIND,
98 [0x1d] = KEY_PLAYPAUSE,
99 [0x1e] = KEY_NEXT,
100 [0x1f] = KEY_VOLUMEUP,
101
102 /* code from device: 0x0340...0x0354 */
103
104 [0x20] = KEY_CHANNELUP,
105 [0x21] = KEY_F21, /* "4:3" - fixme */
106 [0x22] = KEY_TV,
107 [0x23] = KEY_DVD,
108 [0x24] = KEY_VCR,
109 [0x25] = KEY_AUX,
110 [0x26] = KEY_GREEN,
111 [0x27] = KEY_YELLOW,
112 [0x28] = KEY_BLUE,
113 [0x29] = KEY_CHANNEL, /* "CH.LIST" */
114 [0x2a] = KEY_VENDOR, /* "CI" - fixme */
115 [0x2b] = KEY_VOLUMEDOWN,
116 [0x2c] = KEY_CHANNELDOWN,
117 [0x2d] = KEY_LAST,
118 [0x2e] = KEY_INFO,
119 [0x2f] = KEY_FORWARD,
120 [0x30] = KEY_LIST,
121 [0x31] = KEY_FAVORITES,
122 [0x32] = KEY_MENU,
123 [0x33] = KEY_EPG,
124 [0x34] = KEY_EXIT,
125};
126
127int fdtv_register_rc(struct firedtv *fdtv, struct device *dev)
128{
129 struct input_dev *idev;
130 int i, err;
131
132 idev = input_allocate_device();
133 if (!idev)
134 return -ENOMEM;
135
136 fdtv->remote_ctrl_dev = idev;
137 idev->name = "FireDTV remote control";
138 idev->dev.parent = dev;
139 idev->evbit[0] = BIT_MASK(EV_KEY);
140 idev->keycode = kmemdup(keytable, sizeof(keytable), GFP_KERNEL);
141 if (!idev->keycode) {
142 err = -ENOMEM;
143 goto fail;
144 }
145 idev->keycodesize = sizeof(keytable[0]);
146 idev->keycodemax = ARRAY_SIZE(keytable);
147
148 for (i = 0; i < ARRAY_SIZE(keytable); i++)
149 set_bit(keytable[i], idev->keybit);
150
151 err = input_register_device(idev);
152 if (err)
153 goto fail_free_keymap;
154
155 return 0;
156
157fail_free_keymap:
158 kfree(idev->keycode);
159fail:
160 input_free_device(idev);
161 return err;
162}
163
164void fdtv_unregister_rc(struct firedtv *fdtv)
165{
166 kfree(fdtv->remote_ctrl_dev->keycode);
167 input_unregister_device(fdtv->remote_ctrl_dev);
168}
169
170void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
171{
172 u16 *keycode = fdtv->remote_ctrl_dev->keycode;
173
174 if (code >= 0x0300 && code <= 0x031f)
175 code = keycode[code - 0x0300];
176 else if (code >= 0x0340 && code <= 0x0354)
177 code = keycode[code - 0x0320];
178 else if (code >= 0x4501 && code <= 0x451f)
179 code = oldtable[code - 0x4501];
180 else if (code >= 0x4540 && code <= 0x4542)
181 code = oldtable[code - 0x4521];
182 else {
183 printk(KERN_DEBUG "firedtv: invalid key code 0x%04x "
184 "from remote control\n", code);
185 return;
186 }
187
188 input_report_key(fdtv->remote_ctrl_dev, code, 1);
189 input_report_key(fdtv->remote_ctrl_dev, code, 0);
190}
diff --git a/drivers/media/dvb/firewire/firedtv.h b/drivers/media/dvb/firewire/firedtv.h
new file mode 100644
index 000000000000..d48530b81e61
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv.h
@@ -0,0 +1,182 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#ifndef _FIREDTV_H
14#define _FIREDTV_H
15
16#include <linux/dvb/dmx.h>
17#include <linux/dvb/frontend.h>
18#include <linux/list.h>
19#include <linux/mutex.h>
20#include <linux/spinlock_types.h>
21#include <linux/types.h>
22#include <linux/wait.h>
23#include <linux/workqueue.h>
24
25#include <demux.h>
26#include <dmxdev.h>
27#include <dvb_demux.h>
28#include <dvb_frontend.h>
29#include <dvb_net.h>
30#include <dvbdev.h>
31
32struct firedtv_tuner_status {
33 unsigned active_system:8;
34 unsigned searching:1;
35 unsigned moving:1;
36 unsigned no_rf:1;
37 unsigned input:1;
38 unsigned selected_antenna:7;
39 unsigned ber:32;
40 unsigned signal_strength:8;
41 unsigned raster_frequency:2;
42 unsigned rf_frequency:22;
43 unsigned man_dep_info_length:8;
44 unsigned front_end_error:1;
45 unsigned antenna_error:1;
46 unsigned front_end_power_status:1;
47 unsigned power_supply:1;
48 unsigned carrier_noise_ratio:16;
49 unsigned power_supply_voltage:8;
50 unsigned antenna_voltage:8;
51 unsigned firewire_bus_voltage:8;
52 unsigned ca_mmi:1;
53 unsigned ca_pmt_reply:1;
54 unsigned ca_date_time_request:1;
55 unsigned ca_application_info:1;
56 unsigned ca_module_present_status:1;
57 unsigned ca_dvb_flag:1;
58 unsigned ca_error_flag:1;
59 unsigned ca_initialization_status:1;
60};
61
62enum model_type {
63 FIREDTV_UNKNOWN = 0,
64 FIREDTV_DVB_S = 1,
65 FIREDTV_DVB_C = 2,
66 FIREDTV_DVB_T = 3,
67 FIREDTV_DVB_S2 = 4,
68};
69
70struct device;
71struct input_dev;
72struct firedtv;
73
74struct firedtv_backend {
75 int (*lock)(struct firedtv *fdtv, u64 addr, void *data, __be32 arg);
76 int (*read)(struct firedtv *fdtv, u64 addr, void *data, size_t len);
77 int (*write)(struct firedtv *fdtv, u64 addr, void *data, size_t len);
78 int (*start_iso)(struct firedtv *fdtv);
79 void (*stop_iso)(struct firedtv *fdtv);
80};
81
82struct firedtv {
83 struct device *device;
84 struct list_head list;
85
86 struct dvb_adapter adapter;
87 struct dmxdev dmxdev;
88 struct dvb_demux demux;
89 struct dmx_frontend frontend;
90 struct dvb_net dvbnet;
91 struct dvb_frontend fe;
92
93 struct dvb_device *cadev;
94 int ca_last_command;
95 int ca_time_interval;
96
97 struct mutex avc_mutex;
98 wait_queue_head_t avc_wait;
99 bool avc_reply_received;
100 struct work_struct remote_ctrl_work;
101 struct input_dev *remote_ctrl_dev;
102
103 enum model_type type;
104 char subunit;
105 char isochannel;
106 fe_sec_voltage_t voltage;
107 fe_sec_tone_mode_t tone;
108
109 const struct firedtv_backend *backend;
110 void *backend_data;
111
112 struct mutex demux_mutex;
113 unsigned long channel_active;
114 u16 channel_pid[16];
115
116 size_t response_length;
117 u8 response[512];
118};
119
120/* firedtv-1394.c */
121#ifdef CONFIG_DVB_FIREDTV_IEEE1394
122int fdtv_1394_init(struct ieee1394_device_id id_table[]);
123void fdtv_1394_exit(void);
124#else
125static inline int fdtv_1394_init(struct ieee1394_device_id it[]) { return 0; }
126static inline void fdtv_1394_exit(void) {}
127#endif
128
129/* firedtv-avc.c */
130int avc_recv(struct firedtv *fdtv, void *data, size_t length);
131int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat);
132struct dvb_frontend_parameters;
133int avc_tuner_dsd(struct firedtv *fdtv, struct dvb_frontend_parameters *params);
134int avc_tuner_set_pids(struct firedtv *fdtv, unsigned char pidc, u16 pid[]);
135int avc_tuner_get_ts(struct firedtv *fdtv);
136int avc_identify_subunit(struct firedtv *fdtv);
137struct dvb_diseqc_master_cmd;
138int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
139 char conttone, char nrdiseq,
140 struct dvb_diseqc_master_cmd *diseqcmd);
141void avc_remote_ctrl_work(struct work_struct *work);
142int avc_register_remote_control(struct firedtv *fdtv);
143int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
144int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
145int avc_ca_reset(struct firedtv *fdtv);
146int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
147int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
148int avc_ca_enter_menu(struct firedtv *fdtv);
149int avc_ca_get_mmi(struct firedtv *fdtv, char *mmi_object, unsigned int *len);
150int cmp_establish_pp_connection(struct firedtv *fdtv, int plug, int channel);
151void cmp_break_pp_connection(struct firedtv *fdtv, int plug, int channel);
152
153/* firedtv-ci.c */
154int fdtv_ca_register(struct firedtv *fdtv);
155void fdtv_ca_release(struct firedtv *fdtv);
156
157/* firedtv-dvb.c */
158int fdtv_start_feed(struct dvb_demux_feed *dvbdmxfeed);
159int fdtv_stop_feed(struct dvb_demux_feed *dvbdmxfeed);
160int fdtv_dvb_register(struct firedtv *fdtv);
161void fdtv_dvb_unregister(struct firedtv *fdtv);
162struct firedtv *fdtv_alloc(struct device *dev,
163 const struct firedtv_backend *backend,
164 const char *name, size_t name_len);
165extern const char *fdtv_model_names[];
166
167/* firedtv-fe.c */
168void fdtv_frontend_init(struct firedtv *fdtv);
169
170/* firedtv-rc.c */
171#ifdef CONFIG_DVB_FIREDTV_INPUT
172int fdtv_register_rc(struct firedtv *fdtv, struct device *dev);
173void fdtv_unregister_rc(struct firedtv *fdtv);
174void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code);
175#else
176static inline int fdtv_register_rc(struct firedtv *fdtv,
177 struct device *dev) { return 0; }
178static inline void fdtv_unregister_rc(struct firedtv *fdtv) {}
179static inline void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code) {}
180#endif
181
182#endif /* _FIREDTV_H */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6bdfd47d679d..a2f185fd7072 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2342,6 +2342,17 @@ config ATL1E
2342 To compile this driver as a module, choose M here. The module 2342 To compile this driver as a module, choose M here. The module
2343 will be called atl1e. 2343 will be called atl1e.
2344 2344
2345config ATL1C
2346 tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
2347 depends on PCI && EXPERIMENTAL
2348 select CRC32
2349 select MII
2350 help
2351 This driver supports the Atheros L1C gigabit ethernet adapter.
2352
2353 To compile this driver as a module, choose M here. The module
2354 will be called atl1c.
2355
2345config JME 2356config JME
2346 tristate "JMicron(R) PCI-Express Gigabit Ethernet support" 2357 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
2347 depends on PCI 2358 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a3c5c002f224..aca8492db654 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_BONDING) += bonding/
17obj-$(CONFIG_ATL1) += atlx/ 17obj-$(CONFIG_ATL1) += atlx/
18obj-$(CONFIG_ATL2) += atlx/ 18obj-$(CONFIG_ATL2) += atlx/
19obj-$(CONFIG_ATL1E) += atl1e/ 19obj-$(CONFIG_ATL1E) += atl1e/
20obj-$(CONFIG_ATL1C) += atl1c/
20obj-$(CONFIG_GIANFAR) += gianfar_driver.o 21obj-$(CONFIG_GIANFAR) += gianfar_driver.o
21obj-$(CONFIG_TEHUTI) += tehuti.o 22obj-$(CONFIG_TEHUTI) += tehuti.o
22obj-$(CONFIG_ENIC) += enic/ 23obj-$(CONFIG_ENIC) += enic/
diff --git a/drivers/net/atl1c/Makefile b/drivers/net/atl1c/Makefile
new file mode 100644
index 000000000000..c37d966952ee
--- /dev/null
+++ b/drivers/net/atl1c/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_ATL1C) += atl1c.o
2atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
new file mode 100644
index 000000000000..ac11b84b8377
--- /dev/null
+++ b/drivers/net/atl1c/atl1c.h
@@ -0,0 +1,606 @@
1/*
2 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#ifndef _ATL1C_H_
23#define _ATL1C_H_
24
25#include <linux/version.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ioport.h>
35#include <linux/slab.h>
36#include <linux/list.h>
37#include <linux/delay.h>
38#include <linux/sched.h>
39#include <linux/in.h>
40#include <linux/ip.h>
41#include <linux/ipv6.h>
42#include <linux/udp.h>
43#include <linux/mii.h>
44#include <linux/io.h>
45#include <linux/vmalloc.h>
46#include <linux/pagemap.h>
47#include <linux/tcp.h>
48#include <linux/mii.h>
49#include <linux/ethtool.h>
50#include <linux/if_vlan.h>
51#include <linux/workqueue.h>
52#include <net/checksum.h>
53#include <net/ip6_checksum.h>
54
55#include "atl1c_hw.h"
56
57/* Wake Up Filter Control */
58#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
59#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
60#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
61#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
62#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
63
64#define AT_VLAN_TO_TAG(_vlan, _tag) \
65 _tag = ((((_vlan) >> 8) & 0xFF) |\
66 (((_vlan) & 0xFF) << 8))
67
68#define AT_TAG_TO_VLAN(_tag, _vlan) \
69 _vlan = ((((_tag) >> 8) & 0xFF) |\
70 (((_tag) & 0xFF) << 8))
71
72#define SPEED_0 0xffff
73#define HALF_DUPLEX 1
74#define FULL_DUPLEX 2
75
76#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
77#define MAX_JUMBO_FRAME_SIZE (9*1024)
78#define MAX_TX_OFFLOAD_THRESH (9*1024)
79
80#define AT_MAX_RECEIVE_QUEUE 4
81#define AT_DEF_RECEIVE_QUEUE 1
82#define AT_MAX_TRANSMIT_QUEUE 2
83
84#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
85#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
86
87#define AT_TX_WATCHDOG (5 * HZ)
88#define AT_MAX_INT_WORK 5
89#define AT_TWSI_EEPROM_TIMEOUT 100
90#define AT_HW_MAX_IDLE_DELAY 10
91#define AT_SUSPEND_LINK_TIMEOUT 28
92
93#define AT_ASPM_L0S_TIMER 6
94#define AT_ASPM_L1_TIMER 12
95
96#define ATL1C_PCIE_L0S_L1_DISABLE 0x01
97#define ATL1C_PCIE_PHY_RESET 0x02
98
99#define ATL1C_ASPM_L0s_ENABLE 0x0001
100#define ATL1C_ASPM_L1_ENABLE 0x0002
101
102#define AT_REGS_LEN (75 * sizeof(u32))
103#define AT_EEPROM_LEN 512
104
105#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
106#define ATL1C_RFD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_rx_free_desc)
107#define ATL1C_TPD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_tpd_desc)
108#define ATL1C_RRD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_recv_ret_status)
109
110/* tpd word 1 bit 0:7 General Checksum task offload */
111#define TPD_L4HDR_OFFSET_MASK 0x00FF
112#define TPD_L4HDR_OFFSET_SHIFT 0
113
114/* tpd word 1 bit 0:7 Large Send task offload (IPv4/IPV6) */
115#define TPD_TCPHDR_OFFSET_MASK 0x00FF
116#define TPD_TCPHDR_OFFSET_SHIFT 0
117
118/* tpd word 1 bit 0:7 Custom Checksum task offload */
119#define TPD_PLOADOFFSET_MASK 0x00FF
120#define TPD_PLOADOFFSET_SHIFT 0
121
122/* tpd word 1 bit 8:17 */
123#define TPD_CCSUM_EN_MASK 0x0001
124#define TPD_CCSUM_EN_SHIFT 8
125#define TPD_IP_CSUM_MASK 0x0001
126#define TPD_IP_CSUM_SHIFT 9
127#define TPD_TCP_CSUM_MASK 0x0001
128#define TPD_TCP_CSUM_SHIFT 10
129#define TPD_UDP_CSUM_MASK 0x0001
130#define TPD_UDP_CSUM_SHIFT 11
131#define TPD_LSO_EN_MASK 0x0001 /* TCP Large Send Offload */
132#define TPD_LSO_EN_SHIFT 12
133#define TPD_LSO_VER_MASK 0x0001
134#define TPD_LSO_VER_SHIFT 13 /* 0 : ipv4; 1 : ipv4/ipv6 */
135#define TPD_CON_VTAG_MASK 0x0001
136#define TPD_CON_VTAG_SHIFT 14
137#define TPD_INS_VTAG_MASK 0x0001
138#define TPD_INS_VTAG_SHIFT 15
139#define TPD_IPV4_PACKET_MASK 0x0001 /* valid when LSO VER is 1 */
140#define TPD_IPV4_PACKET_SHIFT 16
141#define TPD_ETH_TYPE_MASK 0x0001
142#define TPD_ETH_TYPE_SHIFT 17 /* 0 : 802.3 frame; 1 : Ethernet */
143
144/* tpd word 18:25 Custom Checksum task offload */
145#define TPD_CCSUM_OFFSET_MASK 0x00FF
146#define TPD_CCSUM_OFFSET_SHIFT 18
147#define TPD_CCSUM_EPAD_MASK 0x0001
148#define TPD_CCSUM_EPAD_SHIFT 30
149
150/* tpd word 18:30 Large Send task offload (IPv4/IPV6) */
151#define TPD_MSS_MASK 0x1FFF
152#define TPD_MSS_SHIFT 18
153
154#define TPD_EOP_MASK 0x0001
155#define TPD_EOP_SHIFT 31
156
157struct atl1c_tpd_desc {
158 __le16 buffer_len; /* include 4-byte CRC */
159 __le16 vlan_tag;
160 __le32 word1;
161 __le64 buffer_addr;
162};
163
164struct atl1c_tpd_ext_desc {
165 u32 reservd_0;
166 __le32 word1;
167 __le32 pkt_len;
168 u32 reservd_1;
169};
170/* rrs word 0 bit 0:31 */
171#define RRS_RX_CSUM_MASK 0xFFFF
172#define RRS_RX_CSUM_SHIFT 0
173#define RRS_RX_RFD_CNT_MASK 0x000F
174#define RRS_RX_RFD_CNT_SHIFT 16
175#define RRS_RX_RFD_INDEX_MASK 0x0FFF
176#define RRS_RX_RFD_INDEX_SHIFT 20
177
178/* rrs flag bit 0:16 */
179#define RRS_HEAD_LEN_MASK 0x00FF
180#define RRS_HEAD_LEN_SHIFT 0
181#define RRS_HDS_TYPE_MASK 0x0003
182#define RRS_HDS_TYPE_SHIFT 8
183#define RRS_CPU_NUM_MASK 0x0003
184#define RRS_CPU_NUM_SHIFT 10
185#define RRS_HASH_FLG_MASK 0x000F
186#define RRS_HASH_FLG_SHIFT 12
187
188#define RRS_HDS_TYPE_HEAD 1
189#define RRS_HDS_TYPE_DATA 2
190
191#define RRS_IS_NO_HDS_TYPE(flag) \
192 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0)
193
194#define RRS_IS_HDS_HEAD(flag) \
195 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
196 RRS_HDS_TYPE_HEAD)
197
198#define RRS_IS_HDS_DATA(flag) \
199 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
200 RRS_HDS_TYPE_DATA)
201
202/* rrs word 3 bit 0:31 */
203#define RRS_PKT_SIZE_MASK 0x3FFF
204#define RRS_PKT_SIZE_SHIFT 0
205#define RRS_ERR_L4_CSUM_MASK 0x0001
206#define RRS_ERR_L4_CSUM_SHIFT 14
207#define RRS_ERR_IP_CSUM_MASK 0x0001
208#define RRS_ERR_IP_CSUM_SHIFT 15
209#define RRS_VLAN_INS_MASK 0x0001
210#define RRS_VLAN_INS_SHIFT 16
211#define RRS_PROT_ID_MASK 0x0007
212#define RRS_PROT_ID_SHIFT 17
213#define RRS_RX_ERR_SUM_MASK 0x0001
214#define RRS_RX_ERR_SUM_SHIFT 20
215#define RRS_RX_ERR_CRC_MASK 0x0001
216#define RRS_RX_ERR_CRC_SHIFT 21
217#define RRS_RX_ERR_FAE_MASK 0x0001
218#define RRS_RX_ERR_FAE_SHIFT 22
219#define RRS_RX_ERR_TRUNC_MASK 0x0001
220#define RRS_RX_ERR_TRUNC_SHIFT 23
221#define RRS_RX_ERR_RUNC_MASK 0x0001
222#define RRS_RX_ERR_RUNC_SHIFT 24
223#define RRS_RX_ERR_ICMP_MASK 0x0001
224#define RRS_RX_ERR_ICMP_SHIFT 25
225#define RRS_PACKET_BCAST_MASK 0x0001
226#define RRS_PACKET_BCAST_SHIFT 26
227#define RRS_PACKET_MCAST_MASK 0x0001
228#define RRS_PACKET_MCAST_SHIFT 27
229#define RRS_PACKET_TYPE_MASK 0x0001
230#define RRS_PACKET_TYPE_SHIFT 28
231#define RRS_FIFO_FULL_MASK 0x0001
232#define RRS_FIFO_FULL_SHIFT 29
233#define RRS_802_3_LEN_ERR_MASK 0x0001
234#define RRS_802_3_LEN_ERR_SHIFT 30
235#define RRS_RXD_UPDATED_MASK 0x0001
236#define RRS_RXD_UPDATED_SHIFT 31
237
238#define RRS_ERR_L4_CSUM 0x00004000
239#define RRS_ERR_IP_CSUM 0x00008000
240#define RRS_VLAN_INS 0x00010000
241#define RRS_RX_ERR_SUM 0x00100000
242#define RRS_RX_ERR_CRC 0x00200000
243#define RRS_802_3_LEN_ERR 0x40000000
244#define RRS_RXD_UPDATED 0x80000000
245
246#define RRS_PACKET_TYPE_802_3 1
247#define RRS_PACKET_TYPE_ETH 0
248#define RRS_PACKET_IS_ETH(word) \
249 (((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \
250 RRS_PACKET_TYPE_ETH)
251#define RRS_RXD_IS_VALID(word) \
252 ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)
253
254#define RRS_PACKET_PROT_IS_IPV4_ONLY(word) \
255 ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 1)
256#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \
257 ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6)
258
259struct atl1c_recv_ret_status {
260 __le32 word0;
261 __le32 rss_hash;
262 __le16 vlan_tag;
263 __le16 flag;
264 __le32 word3;
265};
266
267/* RFD desciptor */
268struct atl1c_rx_free_desc {
269 __le64 buffer_addr;
270};
271
272/* DMA Order Settings */
273enum atl1c_dma_order {
274 atl1c_dma_ord_in = 1,
275 atl1c_dma_ord_enh = 2,
276 atl1c_dma_ord_out = 4
277};
278
279enum atl1c_dma_rcb {
280 atl1c_rcb_64 = 0,
281 atl1c_rcb_128 = 1
282};
283
284enum atl1c_mac_speed {
285 atl1c_mac_speed_0 = 0,
286 atl1c_mac_speed_10_100 = 1,
287 atl1c_mac_speed_1000 = 2
288};
289
290enum atl1c_dma_req_block {
291 atl1c_dma_req_128 = 0,
292 atl1c_dma_req_256 = 1,
293 atl1c_dma_req_512 = 2,
294 atl1c_dma_req_1024 = 3,
295 atl1c_dma_req_2048 = 4,
296 atl1c_dma_req_4096 = 5
297};
298
299enum atl1c_rss_mode {
300 atl1c_rss_mode_disable = 0,
301 atl1c_rss_sig_que = 1,
302 atl1c_rss_mul_que_sig_int = 2,
303 atl1c_rss_mul_que_mul_int = 4,
304};
305
306enum atl1c_rss_type {
307 atl1c_rss_disable = 0,
308 atl1c_rss_ipv4 = 1,
309 atl1c_rss_ipv4_tcp = 2,
310 atl1c_rss_ipv6 = 4,
311 atl1c_rss_ipv6_tcp = 8
312};
313
314enum atl1c_nic_type {
315 athr_l1c = 0,
316 athr_l2c = 1,
317};
318
319enum atl1c_trans_queue {
320 atl1c_trans_normal = 0,
321 atl1c_trans_high = 1
322};
323
324struct atl1c_hw_stats {
325 /* rx */
326 unsigned long rx_ok; /* The number of good packet received. */
327 unsigned long rx_bcast; /* The number of good broadcast packet received. */
328 unsigned long rx_mcast; /* The number of good multicast packet received. */
329 unsigned long rx_pause; /* The number of Pause packet received. */
330 unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */
331 unsigned long rx_fcs_err; /* The number of packets with bad FCS. */
332 unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */
333 unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
334 unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
335 unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
336 unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
337 unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
338 unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
339 unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
340 unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
341 unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
342 unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
343 unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */
344 unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
345 unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
346 unsigned long rx_align_err; /* Alignment Error */
347 unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
348 unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
349 unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */
350
351 /* tx */
352 unsigned long tx_ok; /* The number of good packet transmitted. */
353 unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */
354 unsigned long tx_mcast; /* The number of good multicast packet transmitted. */
355 unsigned long tx_pause; /* The number of Pause packet transmitted. */
356 unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
357 unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
358 unsigned long tx_defer; /* The number of packets transmitted that is deferred. */
359 unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
360 unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
361 unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
362 unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
363 unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
364 unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
365 unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
366 unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
367 unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
368 unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
369 unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */
370 unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
371 unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
372 unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
373 unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
374 unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
375 unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
376 unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
377};
378
379struct atl1c_hw {
380 u8 __iomem *hw_addr; /* inner register address */
381 struct atl1c_adapter *adapter;
382 enum atl1c_nic_type nic_type;
383 enum atl1c_dma_order dma_order;
384 enum atl1c_dma_rcb rcb_value;
385 enum atl1c_dma_req_block dmar_block;
386 enum atl1c_dma_req_block dmaw_block;
387
388 u16 device_id;
389 u16 vendor_id;
390 u16 subsystem_id;
391 u16 subsystem_vendor_id;
392 u8 revision_id;
393
394 u32 intr_mask;
395 u8 dmaw_dly_cnt;
396 u8 dmar_dly_cnt;
397
398 u8 preamble_len;
399 u16 max_frame_size;
400 u16 min_frame_size;
401
402 enum atl1c_mac_speed mac_speed;
403 bool mac_duplex;
404 bool hibernate;
405 u16 media_type;
406#define MEDIA_TYPE_AUTO_SENSOR 0
407#define MEDIA_TYPE_100M_FULL 1
408#define MEDIA_TYPE_100M_HALF 2
409#define MEDIA_TYPE_10M_FULL 3
410#define MEDIA_TYPE_10M_HALF 4
411
412 u16 autoneg_advertised;
413 u16 mii_autoneg_adv_reg;
414 u16 mii_1000t_ctrl_reg;
415
416 u16 tx_imt; /* TX Interrupt Moderator timer ( 2us resolution) */
417 u16 rx_imt; /* RX Interrupt Moderator timer ( 2us resolution) */
418 u16 ict; /* Interrupt Clear timer (2us resolution) */
419 u16 ctrl_flags;
420#define ATL1C_INTR_CLEAR_ON_READ 0x0001
421#define ATL1C_INTR_MODRT_ENABLE 0x0002
422#define ATL1C_CMB_ENABLE 0x0004
423#define ATL1C_SMB_ENABLE 0x0010
424#define ATL1C_TXQ_MODE_ENHANCE 0x0020
425#define ATL1C_RX_IPV6_CHKSUM 0x0040
426#define ATL1C_ASPM_L0S_SUPPORT 0x0080
427#define ATL1C_ASPM_L1_SUPPORT 0x0100
428#define ATL1C_ASPM_CTRL_MON 0x0200
429#define ATL1C_HIB_DISABLE 0x0400
430#define ATL1C_LINK_CAP_1000M 0x0800
431#define ATL1C_FPGA_VERSION 0x8000
432 u16 cmb_tpd;
433 u16 cmb_rrd;
434 u16 cmb_rx_timer; /* 2us resolution */
435 u16 cmb_tx_timer;
436 u32 smb_timer;
437
438 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
439 interrupt request */
440 u16 tpd_thresh;
441 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
442 u8 rfd_burst;
443 enum atl1c_rss_type rss_type;
444 enum atl1c_rss_mode rss_mode;
445 u8 rss_hash_bits;
446 u32 base_cpu;
447 u32 indirect_tab;
448 u8 mac_addr[ETH_ALEN];
449 u8 perm_mac_addr[ETH_ALEN];
450
451 bool phy_configured;
452 bool re_autoneg;
453 bool emi_ca;
454};
455
456/*
457 * atl1c_ring_header represents a single, contiguous block of DMA space
458 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
459 * message blocks (cmb, smb) described below
460 */
461struct atl1c_ring_header {
462 void *desc; /* virtual address */
463 dma_addr_t dma; /* physical address*/
464 unsigned int size; /* length in bytes */
465};
466
467/*
468 * atl1c_buffer is wrapper around a pointer to a socket buffer
469 * so a DMA handle can be stored along with the skb
470 */
471struct atl1c_buffer {
472 struct sk_buff *skb; /* socket buffer */
473 u16 length; /* rx buffer length */
474 u16 state; /* state of buffer */
475#define ATL1_BUFFER_FREE 0
476#define ATL1_BUFFER_BUSY 1
477 dma_addr_t dma;
478};
479
480/* transimit packet descriptor (tpd) ring */
481struct atl1c_tpd_ring {
482 void *desc; /* descriptor ring virtual address */
483 dma_addr_t dma; /* descriptor ring physical address */
484 u16 size; /* descriptor ring length in bytes */
485 u16 count; /* number of descriptors in the ring */
486 u16 next_to_use; /* this is protectd by adapter->tx_lock */
487 atomic_t next_to_clean;
488 struct atl1c_buffer *buffer_info;
489};
490
491/* receive free descriptor (rfd) ring */
492struct atl1c_rfd_ring {
493 void *desc; /* descriptor ring virtual address */
494 dma_addr_t dma; /* descriptor ring physical address */
495 u16 size; /* descriptor ring length in bytes */
496 u16 count; /* number of descriptors in the ring */
497 u16 next_to_use;
498 u16 next_to_clean;
499 struct atl1c_buffer *buffer_info;
500};
501
502/* receive return desciptor (rrd) ring */
503struct atl1c_rrd_ring {
504 void *desc; /* descriptor ring virtual address */
505 dma_addr_t dma; /* descriptor ring physical address */
506 u16 size; /* descriptor ring length in bytes */
507 u16 count; /* number of descriptors in the ring */
508 u16 next_to_use;
509 u16 next_to_clean;
510};
511
512struct atl1c_cmb {
513 void *cmb;
514 dma_addr_t dma;
515};
516
517struct atl1c_smb {
518 void *smb;
519 dma_addr_t dma;
520};
521
522/* board specific private data structure */
523struct atl1c_adapter {
524 struct net_device *netdev;
525 struct pci_dev *pdev;
526 struct vlan_group *vlgrp;
527 struct napi_struct napi;
528 struct atl1c_hw hw;
529 struct atl1c_hw_stats hw_stats;
530 struct net_device_stats net_stats;
531 struct mii_if_info mii; /* MII interface info */
532 u16 rx_buffer_len;
533
534 unsigned long flags;
535#define __AT_TESTING 0x0001
536#define __AT_RESETTING 0x0002
537#define __AT_DOWN 0x0003
538 u32 msg_enable;
539
540 bool have_msi;
541 u32 wol;
542 u16 link_speed;
543 u16 link_duplex;
544
545 spinlock_t mdio_lock;
546 spinlock_t tx_lock;
547 atomic_t irq_sem;
548
549 struct work_struct reset_task;
550 struct work_struct link_chg_task;
551 struct timer_list watchdog_timer;
552 struct timer_list phy_config_timer;
553
554 /* All Descriptor memory */
555 struct atl1c_ring_header ring_header;
556 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
557 struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE];
558 struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE];
559 struct atl1c_cmb cmb;
560 struct atl1c_smb smb;
561 int num_rx_queues;
562 u32 bd_number; /* board number;*/
563};
564
565#define AT_WRITE_REG(a, reg, value) ( \
566 writel((value), ((a)->hw_addr + reg)))
567
568#define AT_WRITE_FLUSH(a) (\
569 readl((a)->hw_addr))
570
571#define AT_READ_REG(a, reg, pdata) do { \
572 if (unlikely((a)->hibernate)) { \
573 readl((a)->hw_addr + reg); \
574 *(u32 *)pdata = readl((a)->hw_addr + reg); \
575 } else { \
576 *(u32 *)pdata = readl((a)->hw_addr + reg); \
577 } \
578 } while (0)
579
580#define AT_WRITE_REGB(a, reg, value) (\
581 writeb((value), ((a)->hw_addr + reg)))
582
583#define AT_READ_REGB(a, reg) (\
584 readb((a)->hw_addr + reg))
585
586#define AT_WRITE_REGW(a, reg, value) (\
587 writew((value), ((a)->hw_addr + reg)))
588
589#define AT_READ_REGW(a, reg) (\
590 readw((a)->hw_addr + reg))
591
592#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
593 writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
594
595#define AT_READ_REG_ARRAY(a, reg, offset) ( \
596 readl(((a)->hw_addr + reg) + ((offset) << 2)))
597
598extern char atl1c_driver_name[];
599extern char atl1c_driver_version[];
600
601extern int atl1c_up(struct atl1c_adapter *adapter);
602extern void atl1c_down(struct atl1c_adapter *adapter);
603extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
604extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
605extern void atl1c_set_ethtool_ops(struct net_device *netdev);
606#endif /* _ATL1C_H_ */
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
new file mode 100644
index 000000000000..45c5b7332cd3
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22
23#include <linux/netdevice.h>
24#include <linux/ethtool.h>
25
26#include "atl1c.h"
27
28static int atl1c_get_settings(struct net_device *netdev,
29 struct ethtool_cmd *ecmd)
30{
31 struct atl1c_adapter *adapter = netdev_priv(netdev);
32 struct atl1c_hw *hw = &adapter->hw;
33
34 ecmd->supported = (SUPPORTED_10baseT_Half |
35 SUPPORTED_10baseT_Full |
36 SUPPORTED_100baseT_Half |
37 SUPPORTED_100baseT_Full |
38 SUPPORTED_Autoneg |
39 SUPPORTED_TP);
40 if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M)
41 ecmd->supported |= SUPPORTED_1000baseT_Full;
42
43 ecmd->advertising = ADVERTISED_TP;
44
45 ecmd->advertising |= hw->autoneg_advertised;
46
47 ecmd->port = PORT_TP;
48 ecmd->phy_address = 0;
49 ecmd->transceiver = XCVR_INTERNAL;
50
51 if (adapter->link_speed != SPEED_0) {
52 ecmd->speed = adapter->link_speed;
53 if (adapter->link_duplex == FULL_DUPLEX)
54 ecmd->duplex = DUPLEX_FULL;
55 else
56 ecmd->duplex = DUPLEX_HALF;
57 } else {
58 ecmd->speed = -1;
59 ecmd->duplex = -1;
60 }
61
62 ecmd->autoneg = AUTONEG_ENABLE;
63 return 0;
64}
65
66static int atl1c_set_settings(struct net_device *netdev,
67 struct ethtool_cmd *ecmd)
68{
69 struct atl1c_adapter *adapter = netdev_priv(netdev);
70 struct atl1c_hw *hw = &adapter->hw;
71 u16 autoneg_advertised;
72
73 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
74 msleep(1);
75
76 if (ecmd->autoneg == AUTONEG_ENABLE) {
77 autoneg_advertised = ADVERTISED_Autoneg;
78 } else {
79 if (ecmd->speed == SPEED_1000) {
80 if (ecmd->duplex != DUPLEX_FULL) {
81 if (netif_msg_link(adapter))
82 dev_warn(&adapter->pdev->dev,
83 "1000M half is invalid\n");
84 clear_bit(__AT_RESETTING, &adapter->flags);
85 return -EINVAL;
86 }
87 autoneg_advertised = ADVERTISED_1000baseT_Full;
88 } else if (ecmd->speed == SPEED_100) {
89 if (ecmd->duplex == DUPLEX_FULL)
90 autoneg_advertised = ADVERTISED_100baseT_Full;
91 else
92 autoneg_advertised = ADVERTISED_100baseT_Half;
93 } else {
94 if (ecmd->duplex == DUPLEX_FULL)
95 autoneg_advertised = ADVERTISED_10baseT_Full;
96 else
97 autoneg_advertised = ADVERTISED_10baseT_Half;
98 }
99 }
100
101 if (hw->autoneg_advertised != autoneg_advertised) {
102 hw->autoneg_advertised = autoneg_advertised;
103 if (atl1c_restart_autoneg(hw) != 0) {
104 if (netif_msg_link(adapter))
105 dev_warn(&adapter->pdev->dev,
106 "ethtool speed/duplex setting failed\n");
107 clear_bit(__AT_RESETTING, &adapter->flags);
108 return -EINVAL;
109 }
110 }
111 clear_bit(__AT_RESETTING, &adapter->flags);
112 return 0;
113}
114
115static u32 atl1c_get_tx_csum(struct net_device *netdev)
116{
117 return (netdev->features & NETIF_F_HW_CSUM) != 0;
118}
119
120static u32 atl1c_get_msglevel(struct net_device *netdev)
121{
122 struct atl1c_adapter *adapter = netdev_priv(netdev);
123 return adapter->msg_enable;
124}
125
126static void atl1c_set_msglevel(struct net_device *netdev, u32 data)
127{
128 struct atl1c_adapter *adapter = netdev_priv(netdev);
129 adapter->msg_enable = data;
130}
131
132static int atl1c_get_regs_len(struct net_device *netdev)
133{
134 return AT_REGS_LEN;
135}
136
137static void atl1c_get_regs(struct net_device *netdev,
138 struct ethtool_regs *regs, void *p)
139{
140 struct atl1c_adapter *adapter = netdev_priv(netdev);
141 struct atl1c_hw *hw = &adapter->hw;
142 u32 *regs_buff = p;
143 u16 phy_data;
144
145 memset(p, 0, AT_REGS_LEN);
146
147 regs->version = 0;
148 AT_READ_REG(hw, REG_VPD_CAP, p++);
149 AT_READ_REG(hw, REG_PM_CTRL, p++);
150 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++);
151 AT_READ_REG(hw, REG_TWSI_CTRL, p++);
152 AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++);
153 AT_READ_REG(hw, REG_MASTER_CTRL, p++);
154 AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++);
155 AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++);
156 AT_READ_REG(hw, REG_GPHY_CTRL, p++);
157 AT_READ_REG(hw, REG_LINK_CTRL, p++);
158 AT_READ_REG(hw, REG_IDLE_STATUS, p++);
159 AT_READ_REG(hw, REG_MDIO_CTRL, p++);
160 AT_READ_REG(hw, REG_SERDES_LOCK, p++);
161 AT_READ_REG(hw, REG_MAC_CTRL, p++);
162 AT_READ_REG(hw, REG_MAC_IPG_IFG, p++);
163 AT_READ_REG(hw, REG_MAC_STA_ADDR, p++);
164 AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++);
165 AT_READ_REG(hw, REG_RX_HASH_TABLE, p++);
166 AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++);
167 AT_READ_REG(hw, REG_RXQ_CTRL, p++);
168 AT_READ_REG(hw, REG_TXQ_CTRL, p++);
169 AT_READ_REG(hw, REG_MTU, p++);
170 AT_READ_REG(hw, REG_WOL_CTRL, p++);
171
172 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
173 regs_buff[73] = (u32) phy_data;
174 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
175 regs_buff[74] = (u32) phy_data;
176}
177
178static int atl1c_get_eeprom_len(struct net_device *netdev)
179{
180 struct atl1c_adapter *adapter = netdev_priv(netdev);
181
182 if (atl1c_check_eeprom_exist(&adapter->hw))
183 return AT_EEPROM_LEN;
184 else
185 return 0;
186}
187
188static int atl1c_get_eeprom(struct net_device *netdev,
189 struct ethtool_eeprom *eeprom, u8 *bytes)
190{
191 struct atl1c_adapter *adapter = netdev_priv(netdev);
192 struct atl1c_hw *hw = &adapter->hw;
193 u32 *eeprom_buff;
194 int first_dword, last_dword;
195 int ret_val = 0;
196 int i;
197
198 if (eeprom->len == 0)
199 return -EINVAL;
200
201 if (!atl1c_check_eeprom_exist(hw)) /* not exist */
202 return -EINVAL;
203
204 eeprom->magic = adapter->pdev->vendor |
205 (adapter->pdev->device << 16);
206
207 first_dword = eeprom->offset >> 2;
208 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
209
210 eeprom_buff = kmalloc(sizeof(u32) *
211 (last_dword - first_dword + 1), GFP_KERNEL);
212 if (eeprom_buff == NULL)
213 return -ENOMEM;
214
215 for (i = first_dword; i < last_dword; i++) {
216 if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) {
217 kfree(eeprom_buff);
218 return -EIO;
219 }
220 }
221
222 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
223 eeprom->len);
224 kfree(eeprom_buff);
225
226 return ret_val;
227 return 0;
228}
229
230static void atl1c_get_drvinfo(struct net_device *netdev,
231 struct ethtool_drvinfo *drvinfo)
232{
233 struct atl1c_adapter *adapter = netdev_priv(netdev);
234
235 strncpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
236 strncpy(drvinfo->version, atl1c_driver_version,
237 sizeof(drvinfo->version));
238 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
239 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
240 sizeof(drvinfo->bus_info));
241 drvinfo->n_stats = 0;
242 drvinfo->testinfo_len = 0;
243 drvinfo->regdump_len = atl1c_get_regs_len(netdev);
244 drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
245}
246
247static void atl1c_get_wol(struct net_device *netdev,
248 struct ethtool_wolinfo *wol)
249{
250 struct atl1c_adapter *adapter = netdev_priv(netdev);
251
252 wol->supported = WAKE_MAGIC | WAKE_PHY;
253 wol->wolopts = 0;
254
255 if (adapter->wol & AT_WUFC_EX)
256 wol->wolopts |= WAKE_UCAST;
257 if (adapter->wol & AT_WUFC_MC)
258 wol->wolopts |= WAKE_MCAST;
259 if (adapter->wol & AT_WUFC_BC)
260 wol->wolopts |= WAKE_BCAST;
261 if (adapter->wol & AT_WUFC_MAG)
262 wol->wolopts |= WAKE_MAGIC;
263 if (adapter->wol & AT_WUFC_LNKC)
264 wol->wolopts |= WAKE_PHY;
265
266 return;
267}
268
269static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
270{
271 struct atl1c_adapter *adapter = netdev_priv(netdev);
272
273 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
274 WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
275 return -EOPNOTSUPP;
276 /* these settings will always override what we currently have */
277 adapter->wol = 0;
278
279 if (wol->wolopts & WAKE_MAGIC)
280 adapter->wol |= AT_WUFC_MAG;
281 if (wol->wolopts & WAKE_PHY)
282 adapter->wol |= AT_WUFC_LNKC;
283
284 return 0;
285}
286
287static int atl1c_nway_reset(struct net_device *netdev)
288{
289 struct atl1c_adapter *adapter = netdev_priv(netdev);
290 if (netif_running(netdev))
291 atl1c_reinit_locked(adapter);
292 return 0;
293}
294
295static struct ethtool_ops atl1c_ethtool_ops = {
296 .get_settings = atl1c_get_settings,
297 .set_settings = atl1c_set_settings,
298 .get_drvinfo = atl1c_get_drvinfo,
299 .get_regs_len = atl1c_get_regs_len,
300 .get_regs = atl1c_get_regs,
301 .get_wol = atl1c_get_wol,
302 .set_wol = atl1c_set_wol,
303 .get_msglevel = atl1c_get_msglevel,
304 .set_msglevel = atl1c_set_msglevel,
305 .nway_reset = atl1c_nway_reset,
306 .get_link = ethtool_op_get_link,
307 .get_eeprom_len = atl1c_get_eeprom_len,
308 .get_eeprom = atl1c_get_eeprom,
309 .get_tx_csum = atl1c_get_tx_csum,
310 .get_sg = ethtool_op_get_sg,
311 .set_sg = ethtool_op_set_sg,
312};
313
314void atl1c_set_ethtool_ops(struct net_device *netdev)
315{
316 SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
317}
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
new file mode 100644
index 000000000000..3e69b940b8f7
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/mii.h>
24#include <linux/crc32.h>
25
26#include "atl1c.h"
27
28/*
29 * check_eeprom_exist
30 * return 1 if eeprom exist
31 */
32int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
33{
34 u32 data;
35
36 AT_READ_REG(hw, REG_TWSI_DEBUG, &data);
37 if (data & TWSI_DEBUG_DEV_EXIST)
38 return 1;
39
40 return 0;
41}
42
43void atl1c_hw_set_mac_addr(struct atl1c_hw *hw)
44{
45 u32 value;
46 /*
47 * 00-0B-6A-F6-00-DC
48 * 0: 6AF600DC 1: 000B
49 * low dword
50 */
51 value = (((u32)hw->mac_addr[2]) << 24) |
52 (((u32)hw->mac_addr[3]) << 16) |
53 (((u32)hw->mac_addr[4]) << 8) |
54 (((u32)hw->mac_addr[5])) ;
55 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
56 /* hight dword */
57 value = (((u32)hw->mac_addr[0]) << 8) |
58 (((u32)hw->mac_addr[1])) ;
59 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
60}
61
62/*
63 * atl1c_get_permanent_address
64 * return 0 if get valid mac address,
65 */
66static int atl1c_get_permanent_address(struct atl1c_hw *hw)
67{
68 u32 addr[2];
69 u32 i;
70 u32 otp_ctrl_data;
71 u32 twsi_ctrl_data;
72 u8 eth_addr[ETH_ALEN];
73
74 /* init */
75 addr[0] = addr[1] = 0;
76 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
77 if (atl1c_check_eeprom_exist(hw)) {
78 /* Enable OTP CLK */
79 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
80 otp_ctrl_data |= OTP_CTRL_CLK_EN;
81 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
82 AT_WRITE_FLUSH(hw);
83 msleep(1);
84 }
85
86 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
87 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
88 AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
89 for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
90 msleep(10);
91 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
92 if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
93 break;
94 }
95 if (i >= AT_TWSI_EEPROM_TIMEOUT)
96 return -1;
97 }
98 /* Disable OTP_CLK */
99 if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
100 otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
101 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
102 AT_WRITE_FLUSH(hw);
103 msleep(1);
104 }
105
106 /* maybe MAC-address is from BIOS */
107 AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
108 AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
109 *(u32 *) &eth_addr[2] = swab32(addr[0]);
110 *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
111
112 if (is_valid_ether_addr(eth_addr)) {
113 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
114 return 0;
115 }
116
117 return -1;
118}
119
120bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
121{
122 int i;
123 int ret = false;
124 u32 otp_ctrl_data;
125 u32 control;
126 u32 data;
127
128 if (offset & 3)
129 return ret; /* address do not align */
130
131 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
132 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN))
133 AT_WRITE_REG(hw, REG_OTP_CTRL,
134 (otp_ctrl_data | OTP_CTRL_CLK_EN));
135
136 AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0);
137 control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT;
138 AT_WRITE_REG(hw, REG_EEPROM_CTRL, control);
139
140 for (i = 0; i < 10; i++) {
141 udelay(100);
142 AT_READ_REG(hw, REG_EEPROM_CTRL, &control);
143 if (control & EEPROM_CTRL_RW)
144 break;
145 }
146 if (control & EEPROM_CTRL_RW) {
147 AT_READ_REG(hw, REG_EEPROM_CTRL, &data);
148 AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value);
149 data = data & 0xFFFF;
150 *p_value = swab32((data << 16) | (*p_value >> 16));
151 ret = true;
152 }
153 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN))
154 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
155
156 return ret;
157}
158/*
159 * Reads the adapter's MAC address from the EEPROM
160 *
161 * hw - Struct containing variables accessed by shared code
162 */
163int atl1c_read_mac_addr(struct atl1c_hw *hw)
164{
165 int err = 0;
166
167 err = atl1c_get_permanent_address(hw);
168 if (err)
169 random_ether_addr(hw->perm_mac_addr);
170
171 memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
172 return 0;
173}
174
175/*
176 * atl1c_hash_mc_addr
177 * purpose
178 * set hash value for a multicast address
179 * hash calcu processing :
180 * 1. calcu 32bit CRC for multicast address
181 * 2. reverse crc with MSB to LSB
182 */
183u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr)
184{
185 u32 crc32;
186 u32 value = 0;
187 int i;
188
189 crc32 = ether_crc_le(6, mc_addr);
190 for (i = 0; i < 32; i++)
191 value |= (((crc32 >> i) & 1) << (31 - i));
192
193 return value;
194}
195
196/*
197 * Sets the bit in the multicast table corresponding to the hash value.
198 * hw - Struct containing variables accessed by shared code
199 * hash_value - Multicast address hash value
200 */
201void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value)
202{
203 u32 hash_bit, hash_reg;
204 u32 mta;
205
206 /*
207 * The HASH Table is a register array of 2 32-bit registers.
208 * It is treated like an array of 64 bits. We want to set
209 * bit BitArray[hash_value]. So we figure out what register
210 * the bit is in, read it, OR in the new bit, then write
211 * back the new value. The register is determined by the
212 * upper bit of the hash value and the bit within that
213 * register are determined by the lower 5 bits of the value.
214 */
215 hash_reg = (hash_value >> 31) & 0x1;
216 hash_bit = (hash_value >> 26) & 0x1F;
217
218 mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
219
220 mta |= (1 << hash_bit);
221
222 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
223}
224
225/*
226 * Reads the value from a PHY register
227 * hw - Struct containing variables accessed by shared code
228 * reg_addr - address of the PHY register to read
229 */
230int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
231{
232 u32 val;
233 int i;
234
235 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
236 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
237 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
238
239 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
240
241 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
242 udelay(2);
243 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
244 if (!(val & (MDIO_START | MDIO_BUSY)))
245 break;
246 }
247 if (!(val & (MDIO_START | MDIO_BUSY))) {
248 *phy_data = (u16)val;
249 return 0;
250 }
251
252 return -1;
253}
254
255/*
256 * Writes a value to a PHY register
257 * hw - Struct containing variables accessed by shared code
258 * reg_addr - address of the PHY register to write
259 * data - data to write to the PHY
260 */
261int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
262{
263 int i;
264 u32 val;
265
266 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
267 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
268 MDIO_SUP_PREAMBLE | MDIO_START |
269 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
270
271 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
272
273 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
274 udelay(2);
275 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
276 if (!(val & (MDIO_START | MDIO_BUSY)))
277 break;
278 }
279
280 if (!(val & (MDIO_START | MDIO_BUSY)))
281 return 0;
282
283 return -1;
284}
285
286/*
287 * Configures PHY autoneg and flow control advertisement settings
288 *
289 * hw - Struct containing variables accessed by shared code
290 */
291static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
292{
293 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
294 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
295 ~GIGA_CR_1000T_SPEED_MASK;
296
297 if (hw->autoneg_advertised & ADVERTISED_10baseT_Half)
298 mii_adv_data |= ADVERTISE_10HALF;
299 if (hw->autoneg_advertised & ADVERTISED_10baseT_Full)
300 mii_adv_data |= ADVERTISE_10FULL;
301 if (hw->autoneg_advertised & ADVERTISED_100baseT_Half)
302 mii_adv_data |= ADVERTISE_100HALF;
303 if (hw->autoneg_advertised & ADVERTISED_100baseT_Full)
304 mii_adv_data |= ADVERTISE_100FULL;
305
306 if (hw->autoneg_advertised & ADVERTISED_Autoneg)
307 mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
308 ADVERTISE_100HALF | ADVERTISE_100FULL;
309
310 if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M) {
311 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half)
312 mii_giga_ctrl_data |= ADVERTISE_1000HALF;
313 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full)
314 mii_giga_ctrl_data |= ADVERTISE_1000FULL;
315 if (hw->autoneg_advertised & ADVERTISED_Autoneg)
316 mii_giga_ctrl_data |= ADVERTISE_1000HALF |
317 ADVERTISE_1000FULL;
318 }
319
320 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
321 atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
322 return -1;
323 return 0;
324}
325
326void atl1c_phy_disable(struct atl1c_hw *hw)
327{
328 AT_WRITE_REGW(hw, REG_GPHY_CTRL,
329 GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
330}
331
332static void atl1c_phy_magic_data(struct atl1c_hw *hw)
333{
334 u16 data;
335
336 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
337 ((1 & ANA_INTERVAL_SEL_TIMER_MASK) <<
338 ANA_INTERVAL_SEL_TIMER_SHIFT);
339
340 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18);
341 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
342
343 data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG |
344 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
345 ANA_SERDES_EN_LCKDT;
346
347 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5);
348 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
349
350 data = (44 & ANA_LONG_CABLE_TH_100_MASK) |
351 ((33 & ANA_SHORT_CABLE_TH_100_MASK) <<
352 ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM |
353 ANA_BP_SMALL_BW;
354
355 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54);
356 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
357
358 data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) <<
359 ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
360 ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
361 ANA_IECHO_ADJ_0_SHIFT);
362
363 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4);
364 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
365
366 data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) <<
367 ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE |
368 ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M;
369
370 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0);
371 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
372
373 if (hw->ctrl_flags & ATL1C_HIB_DISABLE) {
374 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41);
375 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
376 return;
377 data &= ~ANA_TOP_PS_EN;
378 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
379
380 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11);
381 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
382 return;
383 data &= ~ANA_PS_HIB_EN;
384 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
385 }
386}
387
388int atl1c_phy_reset(struct atl1c_hw *hw)
389{
390 struct atl1c_adapter *adapter = hw->adapter;
391 struct pci_dev *pdev = adapter->pdev;
392 u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
393 u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
394 int err;
395
396 if (hw->ctrl_flags & ATL1C_HIB_DISABLE)
397 phy_ctrl_data &= ~GPHY_CTRL_HIB_EN;
398
399 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
400 AT_WRITE_FLUSH(hw);
401 msleep(40);
402 phy_ctrl_data |= GPHY_CTRL_EXT_RESET;
403 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
404 AT_WRITE_FLUSH(hw);
405 msleep(10);
406
407 /*Enable PHY LinkChange Interrupt */
408 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
409 if (err) {
410 if (netif_msg_hw(adapter))
411 dev_err(&pdev->dev,
412 "Error enable PHY linkChange Interrupt\n");
413 return err;
414 }
415 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
416 atl1c_phy_magic_data(hw);
417 return 0;
418}
419
420int atl1c_phy_init(struct atl1c_hw *hw)
421{
422 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
423 struct pci_dev *pdev = adapter->pdev;
424 int ret_val;
425 u16 mii_bmcr_data = BMCR_RESET;
426 u16 phy_id1, phy_id2;
427
428 if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) ||
429 (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) {
430 if (netif_msg_link(adapter))
431 dev_err(&pdev->dev, "Error get phy ID\n");
432 return -1;
433 }
434 switch (hw->media_type) {
435 case MEDIA_TYPE_AUTO_SENSOR:
436 ret_val = atl1c_phy_setup_adv(hw);
437 if (ret_val) {
438 if (netif_msg_link(adapter))
439 dev_err(&pdev->dev,
440 "Error Setting up Auto-Negotiation\n");
441 return ret_val;
442 }
443 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
444 break;
445 case MEDIA_TYPE_100M_FULL:
446 mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
447 break;
448 case MEDIA_TYPE_100M_HALF:
449 mii_bmcr_data |= BMCR_SPEED_100;
450 break;
451 case MEDIA_TYPE_10M_FULL:
452 mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
453 break;
454 case MEDIA_TYPE_10M_HALF:
455 mii_bmcr_data |= BMCR_SPEED_10;
456 break;
457 default:
458 if (netif_msg_link(adapter))
459 dev_err(&pdev->dev, "Wrong Media type %d\n",
460 hw->media_type);
461 return -1;
462 break;
463 }
464
465 ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
466 if (ret_val)
467 return ret_val;
468 hw->phy_configured = true;
469
470 return 0;
471}
472
473/*
474 * Detects the current speed and duplex settings of the hardware.
475 *
476 * hw - Struct containing variables accessed by shared code
477 * speed - Speed of the connection
478 * duplex - Duplex setting of the connection
479 */
480int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
481{
482 int err;
483 u16 phy_data;
484
485 /* Read PHY Specific Status Register (17) */
486 err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data);
487 if (err)
488 return err;
489
490 if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED))
491 return -1;
492
493 switch (phy_data & GIGA_PSSR_SPEED) {
494 case GIGA_PSSR_1000MBS:
495 *speed = SPEED_1000;
496 break;
497 case GIGA_PSSR_100MBS:
498 *speed = SPEED_100;
499 break;
500 case GIGA_PSSR_10MBS:
501 *speed = SPEED_10;
502 break;
503 default:
504 return -1;
505 break;
506 }
507
508 if (phy_data & GIGA_PSSR_DPLX)
509 *duplex = FULL_DUPLEX;
510 else
511 *duplex = HALF_DUPLEX;
512
513 return 0;
514}
515
516int atl1c_restart_autoneg(struct atl1c_hw *hw)
517{
518 int err = 0;
519 u16 mii_bmcr_data = BMCR_RESET;
520
521 err = atl1c_phy_setup_adv(hw);
522 if (err)
523 return err;
524 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
525
526 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
527}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
new file mode 100644
index 000000000000..c2c738df5c63
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -0,0 +1,859 @@
1/*
2 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#ifndef _ATL1C_HW_H_
23#define _ATL1C_HW_H_
24
25#include <linux/types.h>
26#include <linux/mii.h>
27
28struct atl1c_adapter;
29struct atl1c_hw;
30
31/* function prototype */
32void atl1c_phy_disable(struct atl1c_hw *hw);
33void atl1c_hw_set_mac_addr(struct atl1c_hw *hw);
34int atl1c_phy_reset(struct atl1c_hw *hw);
35int atl1c_read_mac_addr(struct atl1c_hw *hw);
36int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex);
37u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr);
38void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value);
39int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data);
40int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data);
41bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
42int atl1c_phy_init(struct atl1c_hw *hw);
43int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
44int atl1c_restart_autoneg(struct atl1c_hw *hw);
45
46/* register definition */
47#define REG_DEVICE_CAP 0x5C
48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
49#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
50
51#define REG_DEVICE_CTRL 0x60
52#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
53#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
54#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
55#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
56
57#define REG_LINK_CTRL 0x68
58#define LINK_CTRL_L0S_EN 0x01
59#define LINK_CTRL_L1_EN 0x02
60
61#define REG_VPD_CAP 0x6C
62#define VPD_CAP_ID_MASK 0xff
63#define VPD_CAP_ID_SHIFT 0
64#define VPD_CAP_NEXT_PTR_MASK 0xFF
65#define VPD_CAP_NEXT_PTR_SHIFT 8
66#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
67#define VPD_CAP_VPD_ADDR_SHIFT 16
68#define VPD_CAP_VPD_FLAG 0x80000000
69
70#define REG_VPD_DATA 0x70
71
72#define REG_PCIE_UC_SEVERITY 0x10C
73#define PCIE_UC_SERVRITY_TRN 0x00000001
74#define PCIE_UC_SERVRITY_DLP 0x00000010
75#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000
76#define PCIE_UC_SERVRITY_FCP 0x00002000
77#define PCIE_UC_SERVRITY_CPL_TO 0x00004000
78#define PCIE_UC_SERVRITY_CA 0x00008000
79#define PCIE_UC_SERVRITY_UC 0x00010000
80#define PCIE_UC_SERVRITY_ROV 0x00020000
81#define PCIE_UC_SERVRITY_MLFP 0x00040000
82#define PCIE_UC_SERVRITY_ECRC 0x00080000
83#define PCIE_UC_SERVRITY_UR 0x00100000
84
85#define REG_DEV_SERIALNUM_CTRL 0x200
86#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
87#define REG_DEV_MAC_SEL_SHIFT 0
88#define REG_DEV_SERIAL_NUM_EN_MASK 0x1
89#define REG_DEV_SERIAL_NUM_EN_SHIFT 1
90
91#define REG_TWSI_CTRL 0x218
92#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
93#define TWSI_CTRL_LD_OFFSET_SHIFT 0
94#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
95#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
96#define TWSI_CTRL_SW_LDSTART 0x800
97#define TWSI_CTRL_HW_LDSTART 0x1000
98#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
99#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
100#define TWSI_CTRL_LD_EXIST 0x400000
101#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
102#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
103#define TWSI_CTRL_FREQ_SEL_100K 0
104#define TWSI_CTRL_FREQ_SEL_200K 1
105#define TWSI_CTRL_FREQ_SEL_300K 2
106#define TWSI_CTRL_FREQ_SEL_400K 3
107#define TWSI_CTRL_SMB_SLV_ADDR
108#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
109#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
110
111
112#define REG_PCIE_DEV_MISC_CTRL 0x21C
113#define PCIE_DEV_MISC_EXT_PIPE 0x2
114#define PCIE_DEV_MISC_RETRY_BUFDIS 0x1
115#define PCIE_DEV_MISC_SPIROM_EXIST 0x4
116#define PCIE_DEV_MISC_SERDES_ENDIAN 0x8
117#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10
118
119#define REG_PCIE_PHYMISC 0x1000
120#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
121
122#define REG_TWSI_DEBUG 0x1108
123#define TWSI_DEBUG_DEV_EXIST 0x20000000
124
125#define REG_EEPROM_CTRL 0x12C0
126#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF
127#define EEPROM_CTRL_DATA_HI_SHIFT 0
128#define EEPROM_CTRL_ADDR_MASK 0x3FF
129#define EEPROM_CTRL_ADDR_SHIFT 16
130#define EEPROM_CTRL_ACK 0x40000000
131#define EEPROM_CTRL_RW 0x80000000
132
133#define REG_EEPROM_DATA_LO 0x12C4
134
135#define REG_OTP_CTRL 0x12F0
136#define OTP_CTRL_CLK_EN 0x0002
137
138#define REG_PM_CTRL 0x12F8
139#define PM_CTRL_SDES_EN 0x00000001
140#define PM_CTRL_RBER_EN 0x00000002
141#define PM_CTRL_CLK_REQ_EN 0x00000004
142#define PM_CTRL_ASPM_L1_EN 0x00000008
143#define PM_CTRL_SERDES_L1_EN 0x00000010
144#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020
145#define PM_CTRL_SERDES_PD_EX_L1 0x00000040
146#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080
147#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF
148#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
149#define PM_CTRL_ASPM_L0S_EN 0x00001000
150#define PM_CTRL_CLK_SWH_L1 0x00002000
151#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
152#define PM_CTRL_PCIE_RECV 0x00008000
153#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
154#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
155#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
156#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
157#define PM_CTRL_LCKDET_TIMER_MASK 0x3F
158#define PM_CTRL_LCKDET_TIMER_SHIFT 24
159#define PM_CTRL_MAC_ASPM_CHK 0x40000000
160#define PM_CTRL_HOTRST 0x80000000
161
162/* Selene Master Control Register */
163#define REG_MASTER_CTRL 0x1400
164#define MASTER_CTRL_SOFT_RST 0x1
165#define MASTER_CTRL_TEST_MODE_MASK 0x3
166#define MASTER_CTRL_TEST_MODE_SHIFT 2
167#define MASTER_CTRL_BERT_START 0x10
168#define MASTER_CTRL_MTIMER_EN 0x100
169#define MASTER_CTRL_MANUAL_INT 0x200
170#define MASTER_CTRL_TX_ITIMER_EN 0x400
171#define MASTER_CTRL_RX_ITIMER_EN 0x800
172#define MASTER_CTRL_CLK_SEL_DIS 0x1000
173#define MASTER_CTRL_CLK_SWH_MODE 0x2000
174#define MASTER_CTRL_INT_RDCLR 0x4000
175#define MASTER_CTRL_REV_NUM_SHIFT 16
176#define MASTER_CTRL_REV_NUM_MASK 0xff
177#define MASTER_CTRL_DEV_ID_SHIFT 24
178#define MASTER_CTRL_DEV_ID_MASK 0x7f
179#define MASTER_CTRL_OTP_SEL 0x80000000
180
181/* Timer Initial Value Register */
182#define REG_MANUAL_TIMER_INIT 0x1404
183
184/* IRQ ModeratorTimer Initial Value Register */
185#define REG_IRQ_MODRT_TIMER_INIT 0x1408
186#define IRQ_MODRT_TIMER_MASK 0xffff
187#define IRQ_MODRT_TX_TIMER_SHIFT 0
188#define IRQ_MODRT_RX_TIMER_SHIFT 16
189
190#define REG_GPHY_CTRL 0x140C
191#define GPHY_CTRL_EXT_RESET 0x1
192#define GPHY_CTRL_RTL_MODE 0x2
193#define GPHY_CTRL_LED_MODE 0x4
194#define GPHY_CTRL_ANEG_NOW 0x8
195#define GPHY_CTRL_REV_ANEG 0x10
196#define GPHY_CTRL_GATE_25M_EN 0x20
197#define GPHY_CTRL_LPW_EXIT 0x40
198#define GPHY_CTRL_PHY_IDDQ 0x80
199#define GPHY_CTRL_PHY_IDDQ_DIS 0x100
200#define GPHY_CTRL_GIGA_DIS 0x200
201#define GPHY_CTRL_HIB_EN 0x400
202#define GPHY_CTRL_HIB_PULSE 0x800
203#define GPHY_CTRL_SEL_ANA_RST 0x1000
204#define GPHY_CTRL_PHY_PLL_ON 0x2000
205#define GPHY_CTRL_PWDOWN_HW 0x4000
206#define GPHY_CTRL_PHY_PLL_BYPASS 0x8000
207
208#define GPHY_CTRL_DEFAULT ( \
209 GPHY_CTRL_SEL_ANA_RST |\
210 GPHY_CTRL_HIB_PULSE |\
211 GPHY_CTRL_HIB_EN)
212
213#define GPHY_CTRL_PW_WOL_DIS ( \
214 GPHY_CTRL_SEL_ANA_RST |\
215 GPHY_CTRL_HIB_PULSE |\
216 GPHY_CTRL_HIB_EN |\
217 GPHY_CTRL_PWDOWN_HW |\
218 GPHY_CTRL_PHY_IDDQ)
219
220/* Block IDLE Status Register */
221#define REG_IDLE_STATUS 0x1410
222#define IDLE_STATUS_MASK 0x00FF
223#define IDLE_STATUS_RXMAC_NO_IDLE 0x1
224#define IDLE_STATUS_TXMAC_NO_IDLE 0x2
225#define IDLE_STATUS_RXQ_NO_IDLE 0x4
226#define IDLE_STATUS_TXQ_NO_IDLE 0x8
227#define IDLE_STATUS_DMAR_NO_IDLE 0x10
228#define IDLE_STATUS_DMAW_NO_IDLE 0x20
229#define IDLE_STATUS_SMB_NO_IDLE 0x40
230#define IDLE_STATUS_CMB_NO_IDLE 0x80
231
232/* MDIO Control Register */
233#define REG_MDIO_CTRL 0x1414
234#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit
235 * control data to write to PHY
236 * MII management register */
237#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit
238 * status data that was read
239 * from the PHY MII management register */
240#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */
241#define MDIO_REG_ADDR_SHIFT 16
242#define MDIO_RW 0x200000 /* 1: read, 0: write */
243#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */
244#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO
245 * master. And this bit is self
246 * cleared after one cycle */
247#define MDIO_CLK_SEL_SHIFT 24
248#define MDIO_CLK_25_4 0
249#define MDIO_CLK_25_6 2
250#define MDIO_CLK_25_8 3
251#define MDIO_CLK_25_10 4
252#define MDIO_CLK_25_14 5
253#define MDIO_CLK_25_20 6
254#define MDIO_CLK_25_28 7
255#define MDIO_BUSY 0x8000000
256#define MDIO_AP_EN 0x10000000
257#define MDIO_WAIT_TIMES 10
258
259/* MII PHY Status Register */
260#define REG_PHY_STATUS 0x1418
261#define PHY_GENERAL_STATUS_MASK 0xFFFF
262#define PHY_STATUS_RECV_ENABLE 0x0001
263#define PHY_OE_PWSP_STATUS_MASK 0x07FF
264#define PHY_OE_PWSP_STATUS_SHIFT 16
265#define PHY_STATUS_LPW_STATE 0x80000000
266/* BIST Control and Status Register0 (for the Packet Memory) */
267#define REG_BIST0_CTRL 0x141c
268#define BIST0_NOW 0x1
269#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is
270 * un-repairable because
271 * it has address decoder
272 * failure or more than 1 cell
273 * stuck-to-x failure */
274#define BIST0_FUSE_FLAG 0x4
275
276/* BIST Control and Status Register1(for the retry buffer of PCI Express) */
277#define REG_BIST1_CTRL 0x1420
278#define BIST1_NOW 0x1
279#define BIST1_SRAM_FAIL 0x2
280#define BIST1_FUSE_FLAG 0x4
281
282/* SerDes Lock Detect Control and Status Register */
283#define REG_SERDES_LOCK 0x1424
284#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal
285 * comes from Analog SerDes */
286#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */
287
288/* MAC Control Register */
289#define REG_MAC_CTRL 0x1480
290#define MAC_CTRL_TX_EN 0x1
291#define MAC_CTRL_RX_EN 0x2
292#define MAC_CTRL_TX_FLOW 0x4
293#define MAC_CTRL_RX_FLOW 0x8
294#define MAC_CTRL_LOOPBACK 0x10
295#define MAC_CTRL_DUPLX 0x20
296#define MAC_CTRL_ADD_CRC 0x40
297#define MAC_CTRL_PAD 0x80
298#define MAC_CTRL_LENCHK 0x100
299#define MAC_CTRL_HUGE_EN 0x200
300#define MAC_CTRL_PRMLEN_SHIFT 10
301#define MAC_CTRL_PRMLEN_MASK 0xf
302#define MAC_CTRL_RMV_VLAN 0x4000
303#define MAC_CTRL_PROMIS_EN 0x8000
304#define MAC_CTRL_TX_PAUSE 0x10000
305#define MAC_CTRL_SCNT 0x20000
306#define MAC_CTRL_SRST_TX 0x40000
307#define MAC_CTRL_TX_SIMURST 0x80000
308#define MAC_CTRL_SPEED_SHIFT 20
309#define MAC_CTRL_SPEED_MASK 0x3
310#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
311#define MAC_CTRL_TX_HUGE 0x800000
312#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
313#define MAC_CTRL_MC_ALL_EN 0x2000000
314#define MAC_CTRL_BC_EN 0x4000000
315#define MAC_CTRL_DBG 0x8000000
316#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000
317
318/* MAC IPG/IFG Control Register */
319#define REG_MAC_IPG_IFG 0x1484
320#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back
321 * inter-packet gap. The
322 * default is 96-bit time */
323#define MAC_IPG_IFG_IPGT_MASK 0x7f
324#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to
325 * enforce in between RX frames */
326#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */
327#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */
328#define MAC_IPG_IFG_IPGR1_MASK 0x7f
329#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */
330#define MAC_IPG_IFG_IPGR2_MASK 0x7f
331
332/* MAC STATION ADDRESS */
333#define REG_MAC_STA_ADDR 0x1488
334
335/* Hash table for multicast address */
336#define REG_RX_HASH_TABLE 0x1490
337
338/* MAC Half-Duplex Control Register */
339#define REG_MAC_HALF_DUPLX_CTRL 0x1498
340#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */
341#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
342#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
343#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
344#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
345#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
346#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* No back-off on backpressure,
347 * immediately start the
348 * transmission after back pressure */
349#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */
350#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */
351#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
352#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */
353#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */
354
355/* Maximum Frame Length Control Register */
356#define REG_MTU 0x149c
357
358/* Wake-On-Lan control register */
359#define REG_WOL_CTRL 0x14a0
360#define WOL_PATTERN_EN 0x00000001
361#define WOL_PATTERN_PME_EN 0x00000002
362#define WOL_MAGIC_EN 0x00000004
363#define WOL_MAGIC_PME_EN 0x00000008
364#define WOL_LINK_CHG_EN 0x00000010
365#define WOL_LINK_CHG_PME_EN 0x00000020
366#define WOL_PATTERN_ST 0x00000100
367#define WOL_MAGIC_ST 0x00000200
368#define WOL_LINKCHG_ST 0x00000400
369#define WOL_CLK_SWITCH_EN 0x00008000
370#define WOL_PT0_EN 0x00010000
371#define WOL_PT1_EN 0x00020000
372#define WOL_PT2_EN 0x00040000
373#define WOL_PT3_EN 0x00080000
374#define WOL_PT4_EN 0x00100000
375#define WOL_PT5_EN 0x00200000
376#define WOL_PT6_EN 0x00400000
377
378/* WOL Length ( 2 DWORD ) */
379#define REG_WOL_PATTERN_LEN 0x14a4
380#define WOL_PT_LEN_MASK 0x7f
381#define WOL_PT0_LEN_SHIFT 0
382#define WOL_PT1_LEN_SHIFT 8
383#define WOL_PT2_LEN_SHIFT 16
384#define WOL_PT3_LEN_SHIFT 24
385#define WOL_PT4_LEN_SHIFT 0
386#define WOL_PT5_LEN_SHIFT 8
387#define WOL_PT6_LEN_SHIFT 16
388
389/* Internal SRAM Partition Register */
390#define RFDX_HEAD_ADDR_MASK 0x03FF
391#define RFDX_HARD_ADDR_SHIFT 0
392#define RFDX_TAIL_ADDR_MASK 0x03FF
393#define RFDX_TAIL_ADDR_SHIFT 16
394
395#define REG_SRAM_RFD0_INFO 0x1500
396#define REG_SRAM_RFD1_INFO 0x1504
397#define REG_SRAM_RFD2_INFO 0x1508
398#define REG_SRAM_RFD3_INFO 0x150C
399
400#define REG_RFD_NIC_LEN 0x1510 /* In 8-bytes */
401#define RFD_NIC_LEN_MASK 0x03FF
402
403#define REG_SRAM_TRD_ADDR 0x1518
404#define TPD_HEAD_ADDR_MASK 0x03FF
405#define TPD_HEAD_ADDR_SHIFT 0
406#define TPD_TAIL_ADDR_MASK 0x03FF
407#define TPD_TAIL_ADDR_SHIFT 16
408
409#define REG_SRAM_TRD_LEN 0x151C /* In 8-bytes */
410#define TPD_NIC_LEN_MASK 0x03FF
411
412#define REG_SRAM_RXF_ADDR 0x1520
413#define REG_SRAM_RXF_LEN 0x1524
414#define REG_SRAM_TXF_ADDR 0x1528
415#define REG_SRAM_TXF_LEN 0x152C
416#define REG_SRAM_TCPH_ADDR 0x1530
417#define REG_SRAM_PKTH_ADDR 0x1532
418
419/*
420 * Load Ptr Register
421 * Software sets this bit after the initialization of the head and tail */
422#define REG_LOAD_PTR 0x1534
423
424/*
425 * addresses of all descriptors, as well as the following descriptor
426 * control register, which triggers each function block to load the head
427 * pointer to prepare for the operation. This bit is then self-cleared
428 * after one cycle.
429 */
430#define REG_RX_BASE_ADDR_HI 0x1540
431#define REG_TX_BASE_ADDR_HI 0x1544
432#define REG_SMB_BASE_ADDR_HI 0x1548
433#define REG_SMB_BASE_ADDR_LO 0x154C
434#define REG_RFD0_HEAD_ADDR_LO 0x1550
435#define REG_RFD1_HEAD_ADDR_LO 0x1554
436#define REG_RFD2_HEAD_ADDR_LO 0x1558
437#define REG_RFD3_HEAD_ADDR_LO 0x155C
438#define REG_RFD_RING_SIZE 0x1560
439#define RFD_RING_SIZE_MASK 0x0FFF
440#define REG_RX_BUF_SIZE 0x1564
441#define RX_BUF_SIZE_MASK 0xFFFF
442#define REG_RRD0_HEAD_ADDR_LO 0x1568
443#define REG_RRD1_HEAD_ADDR_LO 0x156C
444#define REG_RRD2_HEAD_ADDR_LO 0x1570
445#define REG_RRD3_HEAD_ADDR_LO 0x1574
446#define REG_RRD_RING_SIZE 0x1578
447#define RRD_RING_SIZE_MASK 0x0FFF
448#define REG_HTPD_HEAD_ADDR_LO 0x157C
449#define REG_NTPD_HEAD_ADDR_LO 0x1580
450#define REG_TPD_RING_SIZE 0x1584
451#define TPD_RING_SIZE_MASK 0xFFFF
452#define REG_CMB_BASE_ADDR_LO 0x1588
453
454/* RSS about */
455#define REG_RSS_KEY0 0x14B0
456#define REG_RSS_KEY1 0x14B4
457#define REG_RSS_KEY2 0x14B8
458#define REG_RSS_KEY3 0x14BC
459#define REG_RSS_KEY4 0x14C0
460#define REG_RSS_KEY5 0x14C4
461#define REG_RSS_KEY6 0x14C8
462#define REG_RSS_KEY7 0x14CC
463#define REG_RSS_KEY8 0x14D0
464#define REG_RSS_KEY9 0x14D4
465#define REG_IDT_TABLE0 0x14E0
466#define REG_IDT_TABLE1 0x14E4
467#define REG_IDT_TABLE2 0x14E8
468#define REG_IDT_TABLE3 0x14EC
469#define REG_IDT_TABLE4 0x14F0
470#define REG_IDT_TABLE5 0x14F4
471#define REG_IDT_TABLE6 0x14F8
472#define REG_IDT_TABLE7 0x14FC
473#define REG_IDT_TABLE REG_IDT_TABLE0
474#define REG_RSS_HASH_VALUE 0x15B0
475#define REG_RSS_HASH_FLAG 0x15B4
476#define REG_BASE_CPU_NUMBER 0x15B8
477
478/* TXQ Control Register */
479#define REG_TXQ_CTRL 0x1590
480#define TXQ_NUM_TPD_BURST_MASK 0xF
481#define TXQ_NUM_TPD_BURST_SHIFT 0
482#define TXQ_CTRL_IP_OPTION_EN 0x10
483#define TXQ_CTRL_EN 0x20
484#define TXQ_CTRL_ENH_MODE 0x40
485#define TXQ_CTRL_LS_8023_EN 0x80
486#define TXQ_TXF_BURST_NUM_SHIFT 16
487#define TXQ_TXF_BURST_NUM_MASK 0xFFFF
488
489/* Jumbo packet Threshold for task offload */
490#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */
491#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF
492
493#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */
494#define TXF_WATER_MARK_MASK 0x0FFF
495#define TXF_LOW_WATER_MARK_SHIFT 0
496#define TXF_HIGH_WATER_MARK_SHIFT 16
497#define TXQ_CTRL_BURST_MODE_EN 0x80000000
498
499#define REG_THRUPUT_MON_CTRL 0x159C
500#define THRUPUT_MON_RATE_MASK 0x3
501#define THRUPUT_MON_RATE_SHIFT 0
502#define THRUPUT_MON_EN 0x80
503
504/* RXQ Control Register */
505#define REG_RXQ_CTRL 0x15A0
506#define ASPM_THRUPUT_LIMIT_MASK 0x3
507#define ASPM_THRUPUT_LIMIT_SHIFT 0
508#define ASPM_THRUPUT_LIMIT_NO 0x00
509#define ASPM_THRUPUT_LIMIT_1M 0x01
510#define ASPM_THRUPUT_LIMIT_10M 0x02
511#define ASPM_THRUPUT_LIMIT_100M 0x04
512#define RXQ1_CTRL_EN 0x10
513#define RXQ2_CTRL_EN 0x20
514#define RXQ3_CTRL_EN 0x40
515#define IPV6_CHKSUM_CTRL_EN 0x80
516#define RSS_HASH_BITS_MASK 0x00FF
517#define RSS_HASH_BITS_SHIFT 8
518#define RSS_HASH_IPV4 0x10000
519#define RSS_HASH_IPV4_TCP 0x20000
520#define RSS_HASH_IPV6 0x40000
521#define RSS_HASH_IPV6_TCP 0x80000
522#define RXQ_RFD_BURST_NUM_MASK 0x003F
523#define RXQ_RFD_BURST_NUM_SHIFT 20
524#define RSS_MODE_MASK 0x0003
525#define RSS_MODE_SHIFT 26
526#define RSS_NIP_QUEUE_SEL_MASK 0x1
527#define RSS_NIP_QUEUE_SEL_SHIFT 28
528#define RRS_HASH_CTRL_EN 0x20000000
529#define RX_CUT_THRU_EN 0x40000000
530#define RXQ_CTRL_EN 0x80000000
531
532#define REG_RFD_FREE_THRESH 0x15A4
533#define RFD_FREE_THRESH_MASK 0x003F
534#define RFD_FREE_HI_THRESH_SHIFT 0
535#define RFD_FREE_LO_THRESH_SHIFT 6
536
537/* RXF flow control register */
538#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
539#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0
540#define RXQ_RXF_PAUSE_TH_HI_MASK 0x0FFF
541#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16
542#define RXQ_RXF_PAUSE_TH_LO_MASK 0x0FFF
543
544#define REG_RXD_DMA_CTRL 0x15AC
545#define RXD_DMA_THRESH_MASK 0x0FFF /* In 8-bytes */
546#define RXD_DMA_THRESH_SHIFT 0
547#define RXD_DMA_DOWN_TIMER_MASK 0xFFFF
548#define RXD_DMA_DOWN_TIMER_SHIFT 16
549
550/* DMA Engine Control Register */
551#define REG_DMA_CTRL 0x15C0
552#define DMA_CTRL_DMAR_IN_ORDER 0x1
553#define DMA_CTRL_DMAR_ENH_ORDER 0x2
554#define DMA_CTRL_DMAR_OUT_ORDER 0x4
555#define DMA_CTRL_RCB_VALUE 0x8
556#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007
557#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
558#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007
559#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
560#define DMA_CTRL_DMAR_REQ_PRI 0x400
561#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F
562#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11
563#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F
564#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16
565#define DMA_CTRL_CMB_EN 0x100000
566#define DMA_CTRL_SMB_EN 0x200000
567#define DMA_CTRL_CMB_NOW 0x400000
568#define MAC_CTRL_SMB_DIS 0x1000000
569#define DMA_CTRL_SMB_NOW 0x80000000
570
571/* CMB/SMB Control Register */
572#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */
573#define SMB_STAT_TIMER_MASK 0xFFFFFF
574#define REG_CMB_TPD_THRESH 0x15C8
575#define CMB_TPD_THRESH_MASK 0xFFFF
576#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */
577#define CMB_TX_TIMER_MASK 0xFFFF
578
579/* Mail box */
580#define MB_RFDX_PROD_IDX_MASK 0xFFFF
581#define REG_MB_RFD0_PROD_IDX 0x15E0
582#define REG_MB_RFD1_PROD_IDX 0x15E4
583#define REG_MB_RFD2_PROD_IDX 0x15E8
584#define REG_MB_RFD3_PROD_IDX 0x15EC
585
586#define MB_PRIO_PROD_IDX_MASK 0xFFFF
587#define REG_MB_PRIO_PROD_IDX 0x15F0
588#define MB_HTPD_PROD_IDX_SHIFT 0
589#define MB_NTPD_PROD_IDX_SHIFT 16
590
591#define MB_PRIO_CONS_IDX_MASK 0xFFFF
592#define REG_MB_PRIO_CONS_IDX 0x15F4
593#define MB_HTPD_CONS_IDX_SHIFT 0
594#define MB_NTPD_CONS_IDX_SHIFT 16
595
596#define REG_MB_RFD01_CONS_IDX 0x15F8
597#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
598#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
599#define REG_MB_RFD23_CONS_IDX 0x15FC
600#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
601#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
602
603/* Interrupt Status Register */
604#define REG_ISR 0x1600
605#define ISR_SMB 0x00000001
606#define ISR_TIMER 0x00000002
607/*
608 * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set
609 * in Table 51 Selene Master Control Register (Offset 0x1400).
610 */
611#define ISR_MANUAL 0x00000004
612#define ISR_HW_RXF_OV 0x00000008 /* RXF overflow interrupt */
613#define ISR_RFD0_UR 0x00000010 /* RFD0 under run */
614#define ISR_RFD1_UR 0x00000020
615#define ISR_RFD2_UR 0x00000040
616#define ISR_RFD3_UR 0x00000080
617#define ISR_TXF_UR 0x00000100
618#define ISR_DMAR_TO_RST 0x00000200
619#define ISR_DMAW_TO_RST 0x00000400
620#define ISR_TX_CREDIT 0x00000800
621#define ISR_GPHY 0x00001000
622/* GPHY low power state interrupt */
623#define ISR_GPHY_LPW 0x00002000
624#define ISR_TXQ_TO_RST 0x00004000
625#define ISR_TX_PKT 0x00008000
626#define ISR_RX_PKT_0 0x00010000
627#define ISR_RX_PKT_1 0x00020000
628#define ISR_RX_PKT_2 0x00040000
629#define ISR_RX_PKT_3 0x00080000
630#define ISR_MAC_RX 0x00100000
631#define ISR_MAC_TX 0x00200000
632#define ISR_UR_DETECTED 0x00400000
633#define ISR_FERR_DETECTED 0x00800000
634#define ISR_NFERR_DETECTED 0x01000000
635#define ISR_CERR_DETECTED 0x02000000
636#define ISR_PHY_LINKDOWN 0x04000000
637#define ISR_DIS_INT 0x80000000
638
639/* Interrupt Mask Register */
640#define REG_IMR 0x1604
641
642#define IMR_NORMAL_MASK (\
643 ISR_MANUAL |\
644 ISR_HW_RXF_OV |\
645 ISR_RFD0_UR |\
646 ISR_TXF_UR |\
647 ISR_DMAR_TO_RST |\
648 ISR_TXQ_TO_RST |\
649 ISR_DMAW_TO_RST |\
650 ISR_GPHY |\
651 ISR_TX_PKT |\
652 ISR_RX_PKT_0 |\
653 ISR_GPHY_LPW |\
654 ISR_PHY_LINKDOWN)
655
656#define ISR_RX_PKT (\
657 ISR_RX_PKT_0 |\
658 ISR_RX_PKT_1 |\
659 ISR_RX_PKT_2 |\
660 ISR_RX_PKT_3)
661
662#define ISR_OVER (\
663 ISR_RFD0_UR |\
664 ISR_RFD1_UR |\
665 ISR_RFD2_UR |\
666 ISR_RFD3_UR |\
667 ISR_HW_RXF_OV |\
668 ISR_TXF_UR)
669
670#define ISR_ERROR (\
671 ISR_DMAR_TO_RST |\
672 ISR_TXQ_TO_RST |\
673 ISR_DMAW_TO_RST |\
674 ISR_PHY_LINKDOWN)
675
676#define REG_INT_RETRIG_TIMER 0x1608
677#define INT_RETRIG_TIMER_MASK 0xFFFF
678
679#define REG_HDS_CTRL 0x160C
680#define HDS_CTRL_EN 0x0001
681#define HDS_CTRL_BACKFILLSIZE_SHIFT 8
682#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF
683#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20
684#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF
685
686#define REG_MAC_RX_STATUS_BIN 0x1700
687#define REG_MAC_RX_STATUS_END 0x175c
688#define REG_MAC_TX_STATUS_BIN 0x1760
689#define REG_MAC_TX_STATUS_END 0x17c0
690
691/* DEBUG ADDR */
692#define REG_DEBUG_DATA0 0x1900
693#define REG_DEBUG_DATA1 0x1904
694
695/* PHY Control Register */
696#define MII_BMCR 0x00
697#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
698#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
699#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
700#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
701#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
702#define BMCR_POWER_DOWN 0x0800 /* Power down */
703#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
704#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
705#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
706#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
707#define BMCR_SPEED_MASK 0x2040
708#define BMCR_SPEED_1000 0x0040
709#define BMCR_SPEED_100 0x2000
710#define BMCR_SPEED_10 0x0000
711
712/* PHY Status Register */
713#define MII_BMSR 0x01
714#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
715#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
716#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
717#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
718#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
719#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
720#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
721#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
722#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
723#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
724#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
725#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
726#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
727#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
728#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
729
730#define MII_PHYSID1 0x02
731#define MII_PHYSID2 0x03
732
733/* Autoneg Advertisement Register */
734#define MII_ADVERTISE 0x04
735#define ADVERTISE_SPEED_MASK 0x01E0
736#define ADVERTISE_DEFAULT_CAP 0x0DE0
737
738/* 1000BASE-T Control Register */
739#define MII_GIGA_CR 0x09
740#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
741
742#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
743#define GIGA_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
744#define GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
745#define GIGA_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
746#define GIGA_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
747#define GIGA_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
748#define GIGA_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
749#define GIGA_CR_1000T_SPEED_MASK 0x0300
750#define GIGA_CR_1000T_DEFAULT_CAP 0x0300
751
752/* PHY Specific Status Register */
753#define MII_GIGA_PSSR 0x11
754#define GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
755#define GIGA_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
756#define GIGA_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
757#define GIGA_PSSR_10MBS 0x0000 /* 00=10Mbs */
758#define GIGA_PSSR_100MBS 0x4000 /* 01=100Mbs */
759#define GIGA_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
760
761/* PHY Interrupt Enable Register */
762#define MII_IER 0x12
763#define IER_LINK_UP 0x0400
764#define IER_LINK_DOWN 0x0800
765
766/* PHY Interrupt Status Register */
767#define MII_ISR 0x13
768#define ISR_LINK_UP 0x0400
769#define ISR_LINK_DOWN 0x0800
770
771/* Cable-Detect-Test Control Register */
772#define MII_CDTC 0x16
773#define CDTC_EN_OFF 0 /* sc */
774#define CDTC_EN_BITS 1
775#define CDTC_PAIR_OFF 8
776#define CDTC_PAIR_BIT 2
777
778/* Cable-Detect-Test Status Register */
779#define MII_CDTS 0x1C
780#define CDTS_STATUS_OFF 8
781#define CDTS_STATUS_BITS 2
782#define CDTS_STATUS_NORMAL 0
783#define CDTS_STATUS_SHORT 1
784#define CDTS_STATUS_OPEN 2
785#define CDTS_STATUS_INVALID 3
786
787#define MII_DBG_ADDR 0x1D
788#define MII_DBG_DATA 0x1E
789
790#define MII_ANA_CTRL_0 0x0
791#define ANA_RESTART_CAL 0x0001
792#define ANA_MANUL_SWICH_ON_SHIFT 0x1
793#define ANA_MANUL_SWICH_ON_MASK 0xF
794#define ANA_MAN_ENABLE 0x0020
795#define ANA_SEL_HSP 0x0040
796#define ANA_EN_HB 0x0080
797#define ANA_EN_HBIAS 0x0100
798#define ANA_OEN_125M 0x0200
799#define ANA_EN_LCKDT 0x0400
800#define ANA_LCKDT_PHY 0x0800
801#define ANA_AFE_MODE 0x1000
802#define ANA_VCO_SLOW 0x2000
803#define ANA_VCO_FAST 0x4000
804#define ANA_SEL_CLK125M_DSP 0x8000
805
806#define MII_ANA_CTRL_4 0x4
807#define ANA_IECHO_ADJ_MASK 0xF
808#define ANA_IECHO_ADJ_3_SHIFT 0
809#define ANA_IECHO_ADJ_2_SHIFT 4
810#define ANA_IECHO_ADJ_1_SHIFT 8
811#define ANA_IECHO_ADJ_0_SHIFT 12
812
813#define MII_ANA_CTRL_5 0x5
814#define ANA_SERDES_CDR_BW_SHIFT 0
815#define ANA_SERDES_CDR_BW_MASK 0x3
816#define ANA_MS_PAD_DBG 0x0004
817#define ANA_SPEEDUP_DBG 0x0008
818#define ANA_SERDES_TH_LOS_SHIFT 4
819#define ANA_SERDES_TH_LOS_MASK 0x3
820#define ANA_SERDES_EN_DEEM 0x0040
821#define ANA_SERDES_TXELECIDLE 0x0080
822#define ANA_SERDES_BEACON 0x0100
823#define ANA_SERDES_HALFTXDR 0x0200
824#define ANA_SERDES_SEL_HSP 0x0400
825#define ANA_SERDES_EN_PLL 0x0800
826#define ANA_SERDES_EN 0x1000
827#define ANA_SERDES_EN_LCKDT 0x2000
828
829#define MII_ANA_CTRL_11 0xB
830#define ANA_PS_HIB_EN 0x8000
831
832#define MII_ANA_CTRL_18 0x12
833#define ANA_TEST_MODE_10BT_01SHIFT 0
834#define ANA_TEST_MODE_10BT_01MASK 0x3
835#define ANA_LOOP_SEL_10BT 0x0004
836#define ANA_RGMII_MODE_SW 0x0008
837#define ANA_EN_LONGECABLE 0x0010
838#define ANA_TEST_MODE_10BT_2 0x0020
839#define ANA_EN_10BT_IDLE 0x0400
840#define ANA_EN_MASK_TB 0x0800
841#define ANA_TRIGGER_SEL_TIMER_SHIFT 12
842#define ANA_TRIGGER_SEL_TIMER_MASK 0x3
843#define ANA_INTERVAL_SEL_TIMER_SHIFT 14
844#define ANA_INTERVAL_SEL_TIMER_MASK 0x3
845
846#define MII_ANA_CTRL_41 0x29
847#define ANA_TOP_PS_EN 0x8000
848
849#define MII_ANA_CTRL_54 0x36
850#define ANA_LONG_CABLE_TH_100_SHIFT 0
851#define ANA_LONG_CABLE_TH_100_MASK 0x3F
852#define ANA_DESERVED 0x0040
853#define ANA_EN_LIT_CH 0x0080
854#define ANA_SHORT_CABLE_TH_100_SHIFT 8
855#define ANA_SHORT_CABLE_TH_100_MASK 0x3F
856#define ANA_BP_BAD_LINK_ACCUM 0x4000
857#define ANA_BP_SMALL_BW 0x8000
858
859#endif /*_ATL1C_HW_H_*/
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
new file mode 100644
index 000000000000..deb7b53167ee
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -0,0 +1,2797 @@
1/*
2 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include "atl1c.h"
23
24#define ATL1C_DRV_VERSION "1.0.0.1-NAPI"
25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
28#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
29/*
30 * atl1c_pci_tbl - PCI Device ID Table
31 *
32 * Wildcard entries (PCI_ANY_ID) should come last
33 * Last entry must be all 0s
34 *
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) }
37 */
38static struct pci_device_id atl1c_pci_tbl[] = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
41 /* required last entry */
42 { 0 }
43};
44MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
45
46MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>");
47MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
48MODULE_LICENSE("GPL");
49MODULE_VERSION(ATL1C_DRV_VERSION);
50
51static int atl1c_stop_mac(struct atl1c_hw *hw);
52static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
53static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
54static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
55static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
56static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
57static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
58 int *work_done, int work_to_do);
59
60static const u16 atl1c_pay_load_size[] = {
61 128, 256, 512, 1024, 2048, 4096,
62};
63
64static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
65{
66 REG_MB_RFD0_PROD_IDX,
67 REG_MB_RFD1_PROD_IDX,
68 REG_MB_RFD2_PROD_IDX,
69 REG_MB_RFD3_PROD_IDX
70};
71
72static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
73{
74 REG_RFD0_HEAD_ADDR_LO,
75 REG_RFD1_HEAD_ADDR_LO,
76 REG_RFD2_HEAD_ADDR_LO,
77 REG_RFD3_HEAD_ADDR_LO
78};
79
80static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
81{
82 REG_RRD0_HEAD_ADDR_LO,
83 REG_RRD1_HEAD_ADDR_LO,
84 REG_RRD2_HEAD_ADDR_LO,
85 REG_RRD3_HEAD_ADDR_LO
86};
87
88static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
89 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
90
91/*
92 * atl1c_init_pcie - init PCIE module
93 */
94static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
95{
96 u32 data;
97 u32 pci_cmd;
98 struct pci_dev *pdev = hw->adapter->pdev;
99
100 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
101 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
102 pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
103 PCI_COMMAND_IO);
104 AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd);
105
106 /*
107 * Clear any PowerSaveing Settings
108 */
109 pci_enable_wake(pdev, PCI_D3hot, 0);
110 pci_enable_wake(pdev, PCI_D3cold, 0);
111
112 /*
113 * Mask some pcie error bits
114 */
115 AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data);
116 data &= ~PCIE_UC_SERVRITY_DLP;
117 data &= ~PCIE_UC_SERVRITY_FCP;
118 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
119
120 if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
121 atl1c_disable_l0s_l1(hw);
122 if (flag & ATL1C_PCIE_PHY_RESET)
123 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
124 else
125 AT_WRITE_REG(hw, REG_GPHY_CTRL,
126 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
127
128 msleep(1);
129}
130
131/*
132 * atl1c_irq_enable - Enable default interrupt generation settings
133 * @adapter: board private structure
134 */
135static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
136{
137 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
138 AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF);
139 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
140 AT_WRITE_FLUSH(&adapter->hw);
141 }
142}
143
144/*
145 * atl1c_irq_disable - Mask off interrupt generation on the NIC
146 * @adapter: board private structure
147 */
148static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
149{
150 atomic_inc(&adapter->irq_sem);
151 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
152 AT_WRITE_FLUSH(&adapter->hw);
153 synchronize_irq(adapter->pdev->irq);
154}
155
156/*
157 * atl1c_irq_reset - reset interrupt confiure on the NIC
158 * @adapter: board private structure
159 */
160static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
161{
162 atomic_set(&adapter->irq_sem, 1);
163 atl1c_irq_enable(adapter);
164}
165
166/*
167 * atl1c_phy_config - Timer Call-back
168 * @data: pointer to netdev cast into an unsigned long
169 */
170static void atl1c_phy_config(unsigned long data)
171{
172 struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
173 struct atl1c_hw *hw = &adapter->hw;
174 unsigned long flags;
175
176 spin_lock_irqsave(&adapter->mdio_lock, flags);
177 atl1c_restart_autoneg(hw);
178 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
179}
180
181void atl1c_reinit_locked(struct atl1c_adapter *adapter)
182{
183
184 WARN_ON(in_interrupt());
185 atl1c_down(adapter);
186 atl1c_up(adapter);
187 clear_bit(__AT_RESETTING, &adapter->flags);
188}
189
190static void atl1c_reset_task(struct work_struct *work)
191{
192 struct atl1c_adapter *adapter;
193 struct net_device *netdev;
194
195 adapter = container_of(work, struct atl1c_adapter, reset_task);
196 netdev = adapter->netdev;
197
198 netif_device_detach(netdev);
199 atl1c_down(adapter);
200 atl1c_up(adapter);
201 netif_device_attach(netdev);
202}
203
204static void atl1c_check_link_status(struct atl1c_adapter *adapter)
205{
206 struct atl1c_hw *hw = &adapter->hw;
207 struct net_device *netdev = adapter->netdev;
208 struct pci_dev *pdev = adapter->pdev;
209 int err;
210 unsigned long flags;
211 u16 speed, duplex, phy_data;
212
213 spin_lock_irqsave(&adapter->mdio_lock, flags);
214 /* MII_BMSR must read twise */
215 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
216 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
217 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
218
219 if ((phy_data & BMSR_LSTATUS) == 0) {
220 /* link down */
221 if (netif_carrier_ok(netdev)) {
222 hw->hibernate = true;
223 atl1c_set_aspm(hw, false);
224 if (atl1c_stop_mac(hw) != 0)
225 if (netif_msg_hw(adapter))
226 dev_warn(&pdev->dev,
227 "stop mac failed\n");
228 }
229 netif_carrier_off(netdev);
230 } else {
231 /* Link Up */
232 hw->hibernate = false;
233 spin_lock_irqsave(&adapter->mdio_lock, flags);
234 err = atl1c_get_speed_and_duplex(hw, &speed, &duplex);
235 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
236 if (unlikely(err))
237 return;
238 /* link result is our setting */
239 if (adapter->link_speed != speed ||
240 adapter->link_duplex != duplex) {
241 adapter->link_speed = speed;
242 adapter->link_duplex = duplex;
243 atl1c_enable_tx_ctrl(hw);
244 atl1c_enable_rx_ctrl(hw);
245 atl1c_setup_mac_ctrl(adapter);
246 atl1c_set_aspm(hw, true);
247 if (netif_msg_link(adapter))
248 dev_info(&pdev->dev,
249 "%s: %s NIC Link is Up<%d Mbps %s>\n",
250 atl1c_driver_name, netdev->name,
251 adapter->link_speed,
252 adapter->link_duplex == FULL_DUPLEX ?
253 "Full Duplex" : "Half Duplex");
254 }
255 if (!netif_carrier_ok(netdev))
256 netif_carrier_on(netdev);
257 }
258}
259
260/*
261 * atl1c_link_chg_task - deal with link change event Out of interrupt context
262 * @netdev: network interface device structure
263 */
264static void atl1c_link_chg_task(struct work_struct *work)
265{
266 struct atl1c_adapter *adapter;
267
268 adapter = container_of(work, struct atl1c_adapter, link_chg_task);
269 atl1c_check_link_status(adapter);
270}
271
272static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
273{
274 struct net_device *netdev = adapter->netdev;
275 struct pci_dev *pdev = adapter->pdev;
276 u16 phy_data;
277 u16 link_up;
278
279 spin_lock(&adapter->mdio_lock);
280 atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
281 atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
282 spin_unlock(&adapter->mdio_lock);
283 link_up = phy_data & BMSR_LSTATUS;
284 /* notify upper layer link down ASAP */
285 if (!link_up) {
286 if (netif_carrier_ok(netdev)) {
287 /* old link state: Up */
288 netif_carrier_off(netdev);
289 if (netif_msg_link(adapter))
290 dev_info(&pdev->dev,
291 "%s: %s NIC Link is Down\n",
292 atl1c_driver_name, netdev->name);
293 adapter->link_speed = SPEED_0;
294 }
295 }
296 schedule_work(&adapter->link_chg_task);
297}
298
299static void atl1c_del_timer(struct atl1c_adapter *adapter)
300{
301 del_timer_sync(&adapter->phy_config_timer);
302}
303
304static void atl1c_cancel_work(struct atl1c_adapter *adapter)
305{
306 cancel_work_sync(&adapter->reset_task);
307 cancel_work_sync(&adapter->link_chg_task);
308}
309
310/*
311 * atl1c_tx_timeout - Respond to a Tx Hang
312 * @netdev: network interface device structure
313 */
314static void atl1c_tx_timeout(struct net_device *netdev)
315{
316 struct atl1c_adapter *adapter = netdev_priv(netdev);
317
318 /* Do the reset outside of interrupt context */
319 schedule_work(&adapter->reset_task);
320}
321
322/*
323 * atl1c_set_multi - Multicast and Promiscuous mode set
324 * @netdev: network interface device structure
325 *
326 * The set_multi entry point is called whenever the multicast address
327 * list or the network interface flags are updated. This routine is
328 * responsible for configuring the hardware for proper multicast,
329 * promiscuous mode, and all-multi behavior.
330 */
331static void atl1c_set_multi(struct net_device *netdev)
332{
333 struct atl1c_adapter *adapter = netdev_priv(netdev);
334 struct atl1c_hw *hw = &adapter->hw;
335 struct dev_mc_list *mc_ptr;
336 u32 mac_ctrl_data;
337 u32 hash_value;
338
339 /* Check for Promiscuous and All Multicast modes */
340 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
341
342 if (netdev->flags & IFF_PROMISC) {
343 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
344 } else if (netdev->flags & IFF_ALLMULTI) {
345 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
346 mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
347 } else {
348 mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
349 }
350
351 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
352
353 /* clear the old settings from the multicast hash table */
354 AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
355 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
356
357 /* comoute mc addresses' hash value ,and put it into hash table */
358 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
359 hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
360 atl1c_hash_set(hw, hash_value);
361 }
362}
363
364static void atl1c_vlan_rx_register(struct net_device *netdev,
365 struct vlan_group *grp)
366{
367 struct atl1c_adapter *adapter = netdev_priv(netdev);
368 struct pci_dev *pdev = adapter->pdev;
369 u32 mac_ctrl_data = 0;
370
371 if (netif_msg_pktdata(adapter))
372 dev_dbg(&pdev->dev, "atl1c_vlan_rx_register\n");
373
374 atl1c_irq_disable(adapter);
375
376 adapter->vlgrp = grp;
377 AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
378
379 if (grp) {
380 /* enable VLAN tag insert/strip */
381 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
382 } else {
383 /* disable VLAN tag insert/strip */
384 mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
385 }
386
387 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
388 atl1c_irq_enable(adapter);
389}
390
391static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
392{
393 struct pci_dev *pdev = adapter->pdev;
394
395 if (netif_msg_pktdata(adapter))
396 dev_dbg(&pdev->dev, "atl1c_restore_vlan !");
397 atl1c_vlan_rx_register(adapter->netdev, adapter->vlgrp);
398}
399/*
400 * atl1c_set_mac - Change the Ethernet Address of the NIC
401 * @netdev: network interface device structure
402 * @p: pointer to an address structure
403 *
404 * Returns 0 on success, negative on failure
405 */
406static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
407{
408 struct atl1c_adapter *adapter = netdev_priv(netdev);
409 struct sockaddr *addr = p;
410
411 if (!is_valid_ether_addr(addr->sa_data))
412 return -EADDRNOTAVAIL;
413
414 if (netif_running(netdev))
415 return -EBUSY;
416
417 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
418 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
419
420 atl1c_hw_set_mac_addr(&adapter->hw);
421
422 return 0;
423}
424
425static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
426 struct net_device *dev)
427{
428 int mtu = dev->mtu;
429
430 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
431 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
432}
433/*
434 * atl1c_change_mtu - Change the Maximum Transfer Unit
435 * @netdev: network interface device structure
436 * @new_mtu: new value for maximum frame size
437 *
438 * Returns 0 on success, negative on failure
439 */
440static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
441{
442 struct atl1c_adapter *adapter = netdev_priv(netdev);
443 int old_mtu = netdev->mtu;
444 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
445
446 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
447 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
448 if (netif_msg_link(adapter))
449 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
450 return -EINVAL;
451 }
452 /* set MTU */
453 if (old_mtu != new_mtu && netif_running(netdev)) {
454 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
455 msleep(1);
456 netdev->mtu = new_mtu;
457 adapter->hw.max_frame_size = new_mtu;
458 atl1c_set_rxbufsize(adapter, netdev);
459 atl1c_down(adapter);
460 atl1c_up(adapter);
461 clear_bit(__AT_RESETTING, &adapter->flags);
462 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
463 u32 phy_data;
464
465 AT_READ_REG(&adapter->hw, 0x1414, &phy_data);
466 phy_data |= 0x10000000;
467 AT_WRITE_REG(&adapter->hw, 0x1414, phy_data);
468 }
469
470 }
471 return 0;
472}
473
474/*
475 * caller should hold mdio_lock
476 */
477static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
478{
479 struct atl1c_adapter *adapter = netdev_priv(netdev);
480 u16 result;
481
482 atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
483 return result;
484}
485
486static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
487 int reg_num, int val)
488{
489 struct atl1c_adapter *adapter = netdev_priv(netdev);
490
491 atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
492}
493
494/*
495 * atl1c_mii_ioctl -
496 * @netdev:
497 * @ifreq:
498 * @cmd:
499 */
500static int atl1c_mii_ioctl(struct net_device *netdev,
501 struct ifreq *ifr, int cmd)
502{
503 struct atl1c_adapter *adapter = netdev_priv(netdev);
504 struct pci_dev *pdev = adapter->pdev;
505 struct mii_ioctl_data *data = if_mii(ifr);
506 unsigned long flags;
507 int retval = 0;
508
509 if (!netif_running(netdev))
510 return -EINVAL;
511
512 spin_lock_irqsave(&adapter->mdio_lock, flags);
513 switch (cmd) {
514 case SIOCGMIIPHY:
515 data->phy_id = 0;
516 break;
517
518 case SIOCGMIIREG:
519 if (!capable(CAP_NET_ADMIN)) {
520 retval = -EPERM;
521 goto out;
522 }
523 if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
524 &data->val_out)) {
525 retval = -EIO;
526 goto out;
527 }
528 break;
529
530 case SIOCSMIIREG:
531 if (!capable(CAP_NET_ADMIN)) {
532 retval = -EPERM;
533 goto out;
534 }
535 if (data->reg_num & ~(0x1F)) {
536 retval = -EFAULT;
537 goto out;
538 }
539
540 dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x",
541 data->reg_num, data->val_in);
542 if (atl1c_write_phy_reg(&adapter->hw,
543 data->reg_num, data->val_in)) {
544 retval = -EIO;
545 goto out;
546 }
547 break;
548
549 default:
550 retval = -EOPNOTSUPP;
551 break;
552 }
553out:
554 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
555 return retval;
556}
557
558/*
559 * atl1c_ioctl -
560 * @netdev:
561 * @ifreq:
562 * @cmd:
563 */
564static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
565{
566 switch (cmd) {
567 case SIOCGMIIPHY:
568 case SIOCGMIIREG:
569 case SIOCSMIIREG:
570 return atl1c_mii_ioctl(netdev, ifr, cmd);
571 default:
572 return -EOPNOTSUPP;
573 }
574}
575
576/*
577 * atl1c_alloc_queues - Allocate memory for all rings
578 * @adapter: board private structure to initialize
579 *
580 */
581static int __devinit atl1c_alloc_queues(struct atl1c_adapter *adapter)
582{
583 return 0;
584}
585
586static void atl1c_set_mac_type(struct atl1c_hw *hw)
587{
588 switch (hw->device_id) {
589 case PCI_DEVICE_ID_ATTANSIC_L2C:
590 hw->nic_type = athr_l2c;
591 break;
592
593 case PCI_DEVICE_ID_ATTANSIC_L1C:
594 hw->nic_type = athr_l1c;
595 break;
596
597 default:
598 break;
599 }
600}
601
602static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
603{
604 u32 phy_status_data;
605 u32 link_ctrl_data;
606
607 atl1c_set_mac_type(hw);
608 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
609 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
610
611 hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ |
612 ATL1C_INTR_MODRT_ENABLE |
613 ATL1C_RX_IPV6_CHKSUM |
614 ATL1C_TXQ_MODE_ENHANCE;
615 if (link_ctrl_data & LINK_CTRL_L0S_EN)
616 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
617 if (link_ctrl_data & LINK_CTRL_L1_EN)
618 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
619
620 if (hw->nic_type == athr_l1c) {
621 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
622 hw->ctrl_flags |= ATL1C_LINK_CAP_1000M;
623 }
624 return 0;
625}
626/*
627 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
628 * @adapter: board private structure to initialize
629 *
630 * atl1c_sw_init initializes the Adapter private data structure.
631 * Fields are initialized based on PCI device information and
632 * OS network device settings (MTU size).
633 */
634static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
635{
636 struct atl1c_hw *hw = &adapter->hw;
637 struct pci_dev *pdev = adapter->pdev;
638
639 adapter->wol = 0;
640 adapter->link_speed = SPEED_0;
641 adapter->link_duplex = FULL_DUPLEX;
642 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
643 adapter->tpd_ring[0].count = 1024;
644 adapter->rfd_ring[0].count = 512;
645
646 hw->vendor_id = pdev->vendor;
647 hw->device_id = pdev->device;
648 hw->subsystem_vendor_id = pdev->subsystem_vendor;
649 hw->subsystem_id = pdev->subsystem_device;
650
651 /* before link up, we assume hibernate is true */
652 hw->hibernate = true;
653 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
654 if (atl1c_setup_mac_funcs(hw) != 0) {
655 dev_err(&pdev->dev, "set mac function pointers failed\n");
656 return -1;
657 }
658 hw->intr_mask = IMR_NORMAL_MASK;
659 hw->phy_configured = false;
660 hw->preamble_len = 7;
661 hw->max_frame_size = adapter->netdev->mtu;
662 if (adapter->num_rx_queues < 2) {
663 hw->rss_type = atl1c_rss_disable;
664 hw->rss_mode = atl1c_rss_mode_disable;
665 } else {
666 hw->rss_type = atl1c_rss_ipv4;
667 hw->rss_mode = atl1c_rss_mul_que_mul_int;
668 hw->rss_hash_bits = 16;
669 }
670 hw->autoneg_advertised = ADVERTISED_Autoneg;
671 hw->indirect_tab = 0xE4E4E4E4;
672 hw->base_cpu = 0;
673
674 hw->ict = 50000; /* 100ms */
675 hw->smb_timer = 200000; /* 400ms */
676 hw->cmb_tpd = 4;
677 hw->cmb_tx_timer = 1; /* 2 us */
678 hw->rx_imt = 200;
679 hw->tx_imt = 1000;
680
681 hw->tpd_burst = 5;
682 hw->rfd_burst = 8;
683 hw->dma_order = atl1c_dma_ord_out;
684 hw->dmar_block = atl1c_dma_req_1024;
685 hw->dmaw_block = atl1c_dma_req_1024;
686 hw->dmar_dly_cnt = 15;
687 hw->dmaw_dly_cnt = 4;
688
689 if (atl1c_alloc_queues(adapter)) {
690 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
691 return -ENOMEM;
692 }
693 /* TODO */
694 atl1c_set_rxbufsize(adapter, adapter->netdev);
695 atomic_set(&adapter->irq_sem, 1);
696 spin_lock_init(&adapter->mdio_lock);
697 spin_lock_init(&adapter->tx_lock);
698 set_bit(__AT_DOWN, &adapter->flags);
699
700 return 0;
701}
702
703/*
704 * atl1c_clean_tx_ring - Free Tx-skb
705 * @adapter: board private structure
706 */
707static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
708 enum atl1c_trans_queue type)
709{
710 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
711 struct atl1c_buffer *buffer_info;
712 struct pci_dev *pdev = adapter->pdev;
713 u16 index, ring_count;
714
715 ring_count = tpd_ring->count;
716 for (index = 0; index < ring_count; index++) {
717 buffer_info = &tpd_ring->buffer_info[index];
718 if (buffer_info->state == ATL1_BUFFER_FREE)
719 continue;
720 if (buffer_info->dma)
721 pci_unmap_single(pdev, buffer_info->dma,
722 buffer_info->length,
723 PCI_DMA_TODEVICE);
724 if (buffer_info->skb)
725 dev_kfree_skb(buffer_info->skb);
726 buffer_info->dma = 0;
727 buffer_info->skb = NULL;
728 buffer_info->state = ATL1_BUFFER_FREE;
729 }
730
731 /* Zero out Tx-buffers */
732 memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
733 ring_count);
734 atomic_set(&tpd_ring->next_to_clean, 0);
735 tpd_ring->next_to_use = 0;
736}
737
738/*
739 * atl1c_clean_rx_ring - Free rx-reservation skbs
740 * @adapter: board private structure
741 */
742static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
743{
744 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
745 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
746 struct atl1c_buffer *buffer_info;
747 struct pci_dev *pdev = adapter->pdev;
748 int i, j;
749
750 for (i = 0; i < adapter->num_rx_queues; i++) {
751 for (j = 0; j < rfd_ring[i].count; j++) {
752 buffer_info = &rfd_ring[i].buffer_info[j];
753 if (buffer_info->state == ATL1_BUFFER_FREE)
754 continue;
755 if (buffer_info->dma)
756 pci_unmap_single(pdev, buffer_info->dma,
757 buffer_info->length,
758 PCI_DMA_FROMDEVICE);
759 if (buffer_info->skb)
760 dev_kfree_skb(buffer_info->skb);
761 buffer_info->state = ATL1_BUFFER_FREE;
762 buffer_info->skb = NULL;
763 }
764 /* zero out the descriptor ring */
765 memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
766 rfd_ring[i].next_to_clean = 0;
767 rfd_ring[i].next_to_use = 0;
768 rrd_ring[i].next_to_use = 0;
769 rrd_ring[i].next_to_clean = 0;
770 }
771}
772
773/*
774 * Read / Write Ptr Initialize:
775 */
776static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
777{
778 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
779 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
780 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
781 struct atl1c_buffer *buffer_info;
782 int i, j;
783
784 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
785 tpd_ring[i].next_to_use = 0;
786 atomic_set(&tpd_ring[i].next_to_clean, 0);
787 buffer_info = tpd_ring[i].buffer_info;
788 for (j = 0; j < tpd_ring->count; j++)
789 buffer_info[i].state = ATL1_BUFFER_FREE;
790 }
791 for (i = 0; i < adapter->num_rx_queues; i++) {
792 rfd_ring[i].next_to_use = 0;
793 rfd_ring[i].next_to_clean = 0;
794 rrd_ring[i].next_to_use = 0;
795 rrd_ring[i].next_to_clean = 0;
796 for (j = 0; j < rfd_ring[i].count; j++) {
797 buffer_info = &rfd_ring[i].buffer_info[j];
798 buffer_info->state = ATL1_BUFFER_FREE;
799 }
800 }
801}
802
803/*
804 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
805 * @adapter: board private structure
806 *
807 * Free all transmit software resources
808 */
809static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
810{
811 struct pci_dev *pdev = adapter->pdev;
812
813 pci_free_consistent(pdev, adapter->ring_header.size,
814 adapter->ring_header.desc,
815 adapter->ring_header.dma);
816 adapter->ring_header.desc = NULL;
817
818 /* Note: just free tdp_ring.buffer_info,
819 * it contain rfd_ring.buffer_info, do not double free */
820 if (adapter->tpd_ring[0].buffer_info) {
821 kfree(adapter->tpd_ring[0].buffer_info);
822 adapter->tpd_ring[0].buffer_info = NULL;
823 }
824}
825
826/*
827 * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
828 * @adapter: board private structure
829 *
830 * Return 0 on success, negative on failure
831 */
832static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
833{
834 struct pci_dev *pdev = adapter->pdev;
835 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
836 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
837 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
838 struct atl1c_ring_header *ring_header = &adapter->ring_header;
839 int num_rx_queues = adapter->num_rx_queues;
840 int size;
841 int i;
842 int count = 0;
843 int rx_desc_count = 0;
844 u32 offset = 0;
845
846 rrd_ring[0].count = rfd_ring[0].count;
847 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
848 tpd_ring[i].count = tpd_ring[0].count;
849
850 for (i = 1; i < adapter->num_rx_queues; i++)
851 rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
852
853 /* 2 tpd queue, one high priority queue,
854 * another normal priority queue */
855 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
856 rfd_ring->count * num_rx_queues);
857 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
858 if (unlikely(!tpd_ring->buffer_info)) {
859 dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
860 size);
861 goto err_nomem;
862 }
863 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
864 tpd_ring[i].buffer_info =
865 (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
866 count += tpd_ring[i].count;
867 }
868
869 for (i = 0; i < num_rx_queues; i++) {
870 rfd_ring[i].buffer_info =
871 (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
872 count += rfd_ring[i].count;
873 rx_desc_count += rfd_ring[i].count;
874 }
875 /*
876 * real ring DMA buffer
877 * each ring/block may need up to 8 bytes for alignment, hence the
878 * additional bytes tacked onto the end.
879 */
880 ring_header->size = size =
881 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
882 sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
883 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
884 sizeof(struct atl1c_hw_stats) +
885 8 * 4 + 8 * 2 * num_rx_queues;
886
887 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
888 &ring_header->dma);
889 if (unlikely(!ring_header->desc)) {
890 dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
891 goto err_nomem;
892 }
893 memset(ring_header->desc, 0, ring_header->size);
894 /* init TPD ring */
895
896 tpd_ring[0].dma = roundup(ring_header->dma, 8);
897 offset = tpd_ring[0].dma - ring_header->dma;
898 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
899 tpd_ring[i].dma = ring_header->dma + offset;
900 tpd_ring[i].desc = (u8 *) ring_header->desc + offset;
901 tpd_ring[i].size =
902 sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
903 offset += roundup(tpd_ring[i].size, 8);
904 }
905 /* init RFD ring */
906 for (i = 0; i < num_rx_queues; i++) {
907 rfd_ring[i].dma = ring_header->dma + offset;
908 rfd_ring[i].desc = (u8 *) ring_header->desc + offset;
909 rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
910 rfd_ring[i].count;
911 offset += roundup(rfd_ring[i].size, 8);
912 }
913
914 /* init RRD ring */
915 for (i = 0; i < num_rx_queues; i++) {
916 rrd_ring[i].dma = ring_header->dma + offset;
917 rrd_ring[i].desc = (u8 *) ring_header->desc + offset;
918 rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
919 rrd_ring[i].count;
920 offset += roundup(rrd_ring[i].size, 8);
921 }
922
923 adapter->smb.dma = ring_header->dma + offset;
924 adapter->smb.smb = (u8 *)ring_header->desc + offset;
925 return 0;
926
927err_nomem:
928 kfree(tpd_ring->buffer_info);
929 return -ENOMEM;
930}
931
932static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
933{
934 struct atl1c_hw *hw = &adapter->hw;
935 struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *)
936 adapter->rfd_ring;
937 struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
938 adapter->rrd_ring;
939 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
940 adapter->tpd_ring;
941 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
942 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
943 int i;
944
945 /* TPD */
946 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
947 (u32)((tpd_ring[atl1c_trans_normal].dma &
948 AT_DMA_HI_ADDR_MASK) >> 32));
949 /* just enable normal priority TX queue */
950 AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO,
951 (u32)(tpd_ring[atl1c_trans_normal].dma &
952 AT_DMA_LO_ADDR_MASK));
953 AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO,
954 (u32)(tpd_ring[atl1c_trans_high].dma &
955 AT_DMA_LO_ADDR_MASK));
956 AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
957 (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
958
959
960 /* RFD */
961 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
962 (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
963 for (i = 0; i < adapter->num_rx_queues; i++)
964 AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i],
965 (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
966
967 AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
968 rfd_ring[0].count & RFD_RING_SIZE_MASK);
969 AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
970 adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
971
972 /* RRD */
973 for (i = 0; i < adapter->num_rx_queues; i++)
974 AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i],
975 (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
976 AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
977 (rrd_ring[0].count & RRD_RING_SIZE_MASK));
978
979 /* CMB */
980 AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
981
982 /* SMB */
983 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
984 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
985 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
986 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
987 /* Load all of base address above */
988 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
989}
990
991static void atl1c_configure_tx(struct atl1c_adapter *adapter)
992{
993 struct atl1c_hw *hw = &adapter->hw;
994 u32 dev_ctrl_data;
995 u32 max_pay_load;
996 u16 tx_offload_thresh;
997 u32 txq_ctrl_data;
998 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
999
1000 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
1001 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
1002 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1003 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1004 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
1005 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1006 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1007 hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
1008 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1009 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1010 hw->dmar_block = min(max_pay_load, hw->dmar_block);
1011
1012 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
1013 TXQ_NUM_TPD_BURST_SHIFT;
1014 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
1015 txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
1016 txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] &
1017 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
1018
1019 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1020}
1021
1022static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1023{
1024 struct atl1c_hw *hw = &adapter->hw;
1025 u32 rxq_ctrl_data;
1026
1027 rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) <<
1028 RXQ_RFD_BURST_NUM_SHIFT;
1029
1030 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1031 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1032 if (hw->rss_type == atl1c_rss_ipv4)
1033 rxq_ctrl_data |= RSS_HASH_IPV4;
1034 if (hw->rss_type == atl1c_rss_ipv4_tcp)
1035 rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
1036 if (hw->rss_type == atl1c_rss_ipv6)
1037 rxq_ctrl_data |= RSS_HASH_IPV6;
1038 if (hw->rss_type == atl1c_rss_ipv6_tcp)
1039 rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
1040 if (hw->rss_type != atl1c_rss_disable)
1041 rxq_ctrl_data |= RRS_HASH_CTRL_EN;
1042
1043 rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
1044 RSS_MODE_SHIFT;
1045 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
1046 RSS_HASH_BITS_SHIFT;
1047 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
1048 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M &
1049 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
1050
1051 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1052}
1053
1054static void atl1c_configure_rss(struct atl1c_adapter *adapter)
1055{
1056 struct atl1c_hw *hw = &adapter->hw;
1057
1058 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
1059 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1060}
1061
1062static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1063{
1064 struct atl1c_hw *hw = &adapter->hw;
1065 u32 dma_ctrl_data;
1066
1067 dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI;
1068 if (hw->ctrl_flags & ATL1C_CMB_ENABLE)
1069 dma_ctrl_data |= DMA_CTRL_CMB_EN;
1070 if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
1071 dma_ctrl_data |= DMA_CTRL_SMB_EN;
1072 else
1073 dma_ctrl_data |= MAC_CTRL_SMB_DIS;
1074
1075 switch (hw->dma_order) {
1076 case atl1c_dma_ord_in:
1077 dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
1078 break;
1079 case atl1c_dma_ord_enh:
1080 dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
1081 break;
1082 case atl1c_dma_ord_out:
1083 dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
1084 break;
1085 default:
1086 break;
1087 }
1088
1089 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1090 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1091 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1092 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1093 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1094 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1095 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1096 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1097
1098 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1099}
1100
1101/*
1102 * Stop the mac, transmit and receive units
1103 * hw - Struct containing variables accessed by shared code
1104 * return : 0 or idle status (if error)
1105 */
1106static int atl1c_stop_mac(struct atl1c_hw *hw)
1107{
1108 u32 data;
1109 int timeout;
1110
1111 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1112 data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
1113 RXQ3_CTRL_EN | RXQ_CTRL_EN);
1114 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1115
1116 AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1117 data &= ~TXQ_CTRL_EN;
1118 AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
1119
1120 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
1121 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
1122 if ((data & (IDLE_STATUS_RXQ_NO_IDLE |
1123 IDLE_STATUS_TXQ_NO_IDLE)) == 0)
1124 break;
1125 msleep(1);
1126 }
1127
1128 AT_READ_REG(hw, REG_MAC_CTRL, &data);
1129 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1130 AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1131
1132 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
1133 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
1134 if ((data & IDLE_STATUS_MASK) == 0)
1135 return 0;
1136 msleep(1);
1137 }
1138 return data;
1139}
1140
1141static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
1142{
1143 u32 data;
1144
1145 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1146 switch (hw->adapter->num_rx_queues) {
1147 case 4:
1148 data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1149 break;
1150 case 3:
1151 data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1152 break;
1153 case 2:
1154 data |= RXQ1_CTRL_EN;
1155 break;
1156 default:
1157 break;
1158 }
1159 data |= RXQ_CTRL_EN;
1160 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1161}
1162
1163static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw)
1164{
1165 u32 data;
1166
1167 AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1168 data |= TXQ_CTRL_EN;
1169 AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1170}
1171
1172/*
1173 * Reset the transmit and receive units; mask and clear all interrupts.
1174 * hw - Struct containing variables accessed by shared code
1175 * return : 0 or idle status (if error)
1176 */
1177static int atl1c_reset_mac(struct atl1c_hw *hw)
1178{
1179 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
1180 struct pci_dev *pdev = adapter->pdev;
1181 u32 idle_status_data = 0;
1182 int timeout = 0;
1183 int ret;
1184
1185 AT_WRITE_REG(hw, REG_IMR, 0);
1186 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
1187
1188 ret = atl1c_stop_mac(hw);
1189 if (ret)
1190 return ret;
1191 /*
1192 * Issue Soft Reset to the MAC. This will reset the chip's
1193 * transmit, receive, DMA. It will not effect
1194 * the current PCI configuration. The global reset bit is self-
1195 * clearing, and should clear within a microsecond.
1196 */
1197 AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
1198 AT_WRITE_FLUSH(hw);
1199 msleep(10);
1200 /* Wait at least 10ms for All module to be Idle */
1201 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
1202 AT_READ_REG(hw, REG_IDLE_STATUS, &idle_status_data);
1203 if ((idle_status_data & IDLE_STATUS_MASK) == 0)
1204 break;
1205 msleep(1);
1206 }
1207 if (timeout >= AT_HW_MAX_IDLE_DELAY) {
1208 dev_err(&pdev->dev,
1209 "MAC state machine cann't be idle since"
1210 " disabled for 10ms second\n");
1211 return -1;
1212 }
1213 return 0;
1214}
1215
1216static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1217{
1218 u32 pm_ctrl_data;
1219
1220 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1221 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1222 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1223 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1224 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1225 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1226 pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
1227 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1228
1229 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1230 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1231 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1232 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1233}
1234
1235/*
1236 * Set ASPM state.
1237 * Enable/disable L0s/L1 depend on link state.
1238 */
1239static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1240{
1241 u32 pm_ctrl_data;
1242
1243 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1244
1245 pm_ctrl_data &= PM_CTRL_SERDES_PD_EX_L1;
1246 pm_ctrl_data |= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1247 pm_ctrl_data |= ~PM_CTRL_SERDES_L1_EN;
1248 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1249 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1250
1251 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1252
1253 if (linkup) {
1254 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1255 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1256
1257 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) {
1258 pm_ctrl_data |= AT_ASPM_L1_TIMER <<
1259 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1260 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1261 } else
1262 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1263
1264 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1265 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1266 else
1267 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1268
1269 } else {
1270 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1271 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1272
1273 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1274
1275 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1276 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1277 else
1278 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1279 }
1280
1281 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1282}
1283
1284static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
1285{
1286 struct atl1c_hw *hw = &adapter->hw;
1287 struct net_device *netdev = adapter->netdev;
1288 u32 mac_ctrl_data;
1289
1290 mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
1291 mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1292
1293 if (adapter->link_duplex == FULL_DUPLEX) {
1294 hw->mac_duplex = true;
1295 mac_ctrl_data |= MAC_CTRL_DUPLX;
1296 }
1297
1298 if (adapter->link_speed == SPEED_1000)
1299 hw->mac_speed = atl1c_mac_speed_1000;
1300 else
1301 hw->mac_speed = atl1c_mac_speed_10_100;
1302
1303 mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) <<
1304 MAC_CTRL_SPEED_SHIFT;
1305
1306 mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1307 mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1308 MAC_CTRL_PRMLEN_SHIFT);
1309
1310 if (adapter->vlgrp)
1311 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
1312
1313 mac_ctrl_data |= MAC_CTRL_BC_EN;
1314 if (netdev->flags & IFF_PROMISC)
1315 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
1316 if (netdev->flags & IFF_ALLMULTI)
1317 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
1318
1319 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
1320 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
1321}
1322
1323/*
1324 * atl1c_configure - Configure Transmit&Receive Unit after Reset
1325 * @adapter: board private structure
1326 *
1327 * Configure the Tx /Rx unit of the MAC after a reset.
1328 */
1329static int atl1c_configure(struct atl1c_adapter *adapter)
1330{
1331 struct atl1c_hw *hw = &adapter->hw;
1332 u32 master_ctrl_data = 0;
1333 u32 intr_modrt_data;
1334
1335 /* clear interrupt status */
1336 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1337 /* Clear any WOL status */
1338 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1339 /* set Interrupt Clear Timer
1340 * HW will enable self to assert interrupt event to system after
1341 * waiting x-time for software to notify it accept interrupt.
1342 */
1343 AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
1344 hw->ict & INT_RETRIG_TIMER_MASK);
1345
1346 atl1c_configure_des_ring(adapter);
1347
1348 if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) {
1349 intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) <<
1350 IRQ_MODRT_TX_TIMER_SHIFT;
1351 intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) <<
1352 IRQ_MODRT_RX_TIMER_SHIFT;
1353 AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1354 master_ctrl_data |=
1355 MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN;
1356 }
1357
1358 if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
1359 master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
1360
1361 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1362
1363 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
1364 AT_WRITE_REG(hw, REG_CMB_TPD_THRESH,
1365 hw->cmb_tpd & CMB_TPD_THRESH_MASK);
1366 AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
1367 hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
1368 }
1369
1370 if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
1371 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1372 hw->smb_timer & SMB_STAT_TIMER_MASK);
1373 /* set MTU */
1374 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1375 VLAN_HLEN + ETH_FCS_LEN);
1376 /* HDS, disable */
1377 AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
1378
1379 atl1c_configure_tx(adapter);
1380 atl1c_configure_rx(adapter);
1381 atl1c_configure_rss(adapter);
1382 atl1c_configure_dma(adapter);
1383
1384 return 0;
1385}
1386
1387static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1388{
1389 u16 hw_reg_addr = 0;
1390 unsigned long *stats_item = NULL;
1391 u32 data;
1392
1393 /* update rx status */
1394 hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1395 stats_item = &adapter->hw_stats.rx_ok;
1396 while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1397 AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1398 *stats_item += data;
1399 stats_item++;
1400 hw_reg_addr += 4;
1401 }
1402/* update tx status */
1403 hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1404 stats_item = &adapter->hw_stats.tx_ok;
1405 while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1406 AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1407 *stats_item += data;
1408 stats_item++;
1409 hw_reg_addr += 4;
1410 }
1411}
1412
1413/*
1414 * atl1c_get_stats - Get System Network Statistics
1415 * @netdev: network interface device structure
1416 *
1417 * Returns the address of the device statistics structure.
1418 * The statistics are actually updated from the timer callback.
1419 */
1420static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1421{
1422 struct atl1c_adapter *adapter = netdev_priv(netdev);
1423 struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
1424 struct net_device_stats *net_stats = &adapter->net_stats;
1425
1426 atl1c_update_hw_stats(adapter);
1427 net_stats->rx_packets = hw_stats->rx_ok;
1428 net_stats->tx_packets = hw_stats->tx_ok;
1429 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1430 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1431 net_stats->multicast = hw_stats->rx_mcast;
1432 net_stats->collisions = hw_stats->tx_1_col +
1433 hw_stats->tx_2_col * 2 +
1434 hw_stats->tx_late_col + hw_stats->tx_abort_col;
1435 net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
1436 hw_stats->rx_len_err + hw_stats->rx_sz_ov +
1437 hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
1438 net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
1439 net_stats->rx_length_errors = hw_stats->rx_len_err;
1440 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1441 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1442 net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
1443
1444 net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
1445
1446 net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
1447 hw_stats->tx_underrun + hw_stats->tx_trunc;
1448 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1449 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1450 net_stats->tx_window_errors = hw_stats->tx_late_col;
1451
1452 return &adapter->net_stats;
1453}
1454
1455static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
1456{
1457 u16 phy_data;
1458
1459 spin_lock(&adapter->mdio_lock);
1460 atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data);
1461 spin_unlock(&adapter->mdio_lock);
1462}
1463
1464static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1465 enum atl1c_trans_queue type)
1466{
1467 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1468 &adapter->tpd_ring[type];
1469 struct atl1c_buffer *buffer_info;
1470 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1471 u16 hw_next_to_clean;
1472 u16 shift;
1473 u32 data;
1474
1475 if (type == atl1c_trans_high)
1476 shift = MB_HTPD_CONS_IDX_SHIFT;
1477 else
1478 shift = MB_NTPD_CONS_IDX_SHIFT;
1479
1480 AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data);
1481 hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
1482
1483 while (next_to_clean != hw_next_to_clean) {
1484 buffer_info = &tpd_ring->buffer_info[next_to_clean];
1485 if (buffer_info->state == ATL1_BUFFER_BUSY) {
1486 pci_unmap_page(adapter->pdev, buffer_info->dma,
1487 buffer_info->length, PCI_DMA_TODEVICE);
1488 buffer_info->dma = 0;
1489 if (buffer_info->skb) {
1490 dev_kfree_skb_irq(buffer_info->skb);
1491 buffer_info->skb = NULL;
1492 }
1493 buffer_info->state = ATL1_BUFFER_FREE;
1494 }
1495 if (++next_to_clean == tpd_ring->count)
1496 next_to_clean = 0;
1497 atomic_set(&tpd_ring->next_to_clean, next_to_clean);
1498 }
1499
1500 if (netif_queue_stopped(adapter->netdev) &&
1501 netif_carrier_ok(adapter->netdev)) {
1502 netif_wake_queue(adapter->netdev);
1503 }
1504
1505 return true;
1506}
1507
1508/*
1509 * atl1c_intr - Interrupt Handler
1510 * @irq: interrupt number
1511 * @data: pointer to a network interface device structure
1512 * @pt_regs: CPU registers structure
1513 */
1514static irqreturn_t atl1c_intr(int irq, void *data)
1515{
1516 struct net_device *netdev = data;
1517 struct atl1c_adapter *adapter = netdev_priv(netdev);
1518 struct pci_dev *pdev = adapter->pdev;
1519 struct atl1c_hw *hw = &adapter->hw;
1520 int max_ints = AT_MAX_INT_WORK;
1521 int handled = IRQ_NONE;
1522 u32 status;
1523 u32 reg_data;
1524
1525 do {
1526 AT_READ_REG(hw, REG_ISR, &reg_data);
1527 status = reg_data & hw->intr_mask;
1528
1529 if (status == 0 || (status & ISR_DIS_INT) != 0) {
1530 if (max_ints != AT_MAX_INT_WORK)
1531 handled = IRQ_HANDLED;
1532 break;
1533 }
1534 /* link event */
1535 if (status & ISR_GPHY)
1536 atl1c_clear_phy_int(adapter);
1537 /* Ack ISR */
1538 AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1539 if (status & ISR_RX_PKT) {
1540 if (likely(napi_schedule_prep(&adapter->napi))) {
1541 hw->intr_mask &= ~ISR_RX_PKT;
1542 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1543 __napi_schedule(&adapter->napi);
1544 }
1545 }
1546 if (status & ISR_TX_PKT)
1547 atl1c_clean_tx_irq(adapter, atl1c_trans_normal);
1548
1549 handled = IRQ_HANDLED;
1550 /* check if PCIE PHY Link down */
1551 if (status & ISR_ERROR) {
1552 if (netif_msg_hw(adapter))
1553 dev_err(&pdev->dev,
1554 "atl1c hardware error (status = 0x%x)\n",
1555 status & ISR_ERROR);
1556 /* reset MAC */
1557 hw->intr_mask &= ~ISR_ERROR;
1558 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1559 schedule_work(&adapter->reset_task);
1560 break;
1561 }
1562
1563 if (status & ISR_OVER)
1564 if (netif_msg_intr(adapter))
1565 dev_warn(&pdev->dev,
1566 "TX/RX over flow (status = 0x%x)\n",
1567 status & ISR_OVER);
1568
1569 /* link event */
1570 if (status & (ISR_GPHY | ISR_MANUAL)) {
1571 adapter->net_stats.tx_carrier_errors++;
1572 atl1c_link_chg_event(adapter);
1573 break;
1574 }
1575
1576 } while (--max_ints > 0);
1577 /* re-enable Interrupt*/
1578 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1579 return handled;
1580}
1581
1582static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1583 struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
1584{
1585 /*
1586 * The pid field in RRS in not correct sometimes, so we
1587 * cannot figure out if the packet is fragmented or not,
1588 * so we tell the KERNEL CHECKSUM_NONE
1589 */
1590 skb->ip_summed = CHECKSUM_NONE;
1591}
1592
1593static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
1594{
1595 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid];
1596 struct pci_dev *pdev = adapter->pdev;
1597 struct atl1c_buffer *buffer_info, *next_info;
1598 struct sk_buff *skb;
1599 void *vir_addr = NULL;
1600 u16 num_alloc = 0;
1601 u16 rfd_next_to_use, next_next;
1602 struct atl1c_rx_free_desc *rfd_desc;
1603
1604 next_next = rfd_next_to_use = rfd_ring->next_to_use;
1605 if (++next_next == rfd_ring->count)
1606 next_next = 0;
1607 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1608 next_info = &rfd_ring->buffer_info[next_next];
1609
1610 while (next_info->state == ATL1_BUFFER_FREE) {
1611 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1612
1613 skb = dev_alloc_skb(adapter->rx_buffer_len);
1614 if (unlikely(!skb)) {
1615 if (netif_msg_rx_err(adapter))
1616 dev_warn(&pdev->dev, "alloc rx buffer failed\n");
1617 break;
1618 }
1619
1620 /*
1621 * Make buffer alignment 2 beyond a 16 byte boundary
1622 * this will result in a 16 byte aligned IP header after
1623 * the 14 byte MAC header is removed
1624 */
1625 vir_addr = skb->data;
1626 buffer_info->state = ATL1_BUFFER_BUSY;
1627 buffer_info->skb = skb;
1628 buffer_info->length = adapter->rx_buffer_len;
1629 buffer_info->dma = pci_map_single(pdev, vir_addr,
1630 buffer_info->length,
1631 PCI_DMA_FROMDEVICE);
1632 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1633 rfd_next_to_use = next_next;
1634 if (++next_next == rfd_ring->count)
1635 next_next = 0;
1636 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1637 next_info = &rfd_ring->buffer_info[next_next];
1638 num_alloc++;
1639 }
1640
1641 if (num_alloc) {
1642 /* TODO: update mailbox here */
1643 wmb();
1644 rfd_ring->next_to_use = rfd_next_to_use;
1645 AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid],
1646 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1647 }
1648
1649 return num_alloc;
1650}
1651
1652static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring,
1653 struct atl1c_recv_ret_status *rrs, u16 num)
1654{
1655 u16 i;
1656 /* the relationship between rrd and rfd is one map one */
1657 for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring,
1658 rrd_ring->next_to_clean)) {
1659 rrs->word3 &= ~RRS_RXD_UPDATED;
1660 if (++rrd_ring->next_to_clean == rrd_ring->count)
1661 rrd_ring->next_to_clean = 0;
1662 }
1663}
1664
1665static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1666 struct atl1c_recv_ret_status *rrs, u16 num)
1667{
1668 u16 i;
1669 u16 rfd_index;
1670 struct atl1c_buffer *buffer_info = rfd_ring->buffer_info;
1671
1672 rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1673 RRS_RX_RFD_INDEX_MASK;
1674 for (i = 0; i < num; i++) {
1675 buffer_info[rfd_index].skb = NULL;
1676 buffer_info[rfd_index].state = ATL1_BUFFER_FREE;
1677 if (++rfd_index == rfd_ring->count)
1678 rfd_index = 0;
1679 }
1680 rfd_ring->next_to_clean = rfd_index;
1681}
1682
1683static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
1684 int *work_done, int work_to_do)
1685{
1686 u16 rfd_num, rfd_index;
1687 u16 count = 0;
1688 u16 length;
1689 struct pci_dev *pdev = adapter->pdev;
1690 struct net_device *netdev = adapter->netdev;
1691 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que];
1692 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que];
1693 struct sk_buff *skb;
1694 struct atl1c_recv_ret_status *rrs;
1695 struct atl1c_buffer *buffer_info;
1696
1697 while (1) {
1698 if (*work_done >= work_to_do)
1699 break;
1700 rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
1701 if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
1702 rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
1703 RRS_RX_RFD_CNT_MASK;
1704 if (unlikely(rfd_num) != 1)
1705 /* TODO support mul rfd*/
1706 if (netif_msg_rx_err(adapter))
1707 dev_warn(&pdev->dev,
1708 "Multi rfd not support yet!\n");
1709 goto rrs_checked;
1710 } else {
1711 break;
1712 }
1713rrs_checked:
1714 atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
1715 if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
1716 atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1717 if (netif_msg_rx_err(adapter))
1718 dev_warn(&pdev->dev,
1719 "wrong packet! rrs word3 is %x\n",
1720 rrs->word3);
1721 continue;
1722 }
1723
1724 length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) &
1725 RRS_PKT_SIZE_MASK);
1726 /* Good Receive */
1727 if (likely(rfd_num == 1)) {
1728 rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1729 RRS_RX_RFD_INDEX_MASK;
1730 buffer_info = &rfd_ring->buffer_info[rfd_index];
1731 pci_unmap_single(pdev, buffer_info->dma,
1732 buffer_info->length, PCI_DMA_FROMDEVICE);
1733 skb = buffer_info->skb;
1734 } else {
1735 /* TODO */
1736 if (netif_msg_rx_err(adapter))
1737 dev_warn(&pdev->dev,
1738 "Multi rfd not support yet!\n");
1739 break;
1740 }
1741 atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1742 skb_put(skb, length - ETH_FCS_LEN);
1743 skb->protocol = eth_type_trans(skb, netdev);
1744 skb->dev = netdev;
1745 atl1c_rx_checksum(adapter, skb, rrs);
1746 if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
1747 u16 vlan;
1748
1749 AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
1750 vlan = le16_to_cpu(vlan);
1751 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vlan);
1752 } else
1753 netif_receive_skb(skb);
1754
1755 netdev->last_rx = jiffies;
1756 (*work_done)++;
1757 count++;
1758 }
1759 if (count)
1760 atl1c_alloc_rx_buffer(adapter, que);
1761}
1762
1763/*
1764 * atl1c_clean - NAPI Rx polling callback
1765 * @adapter: board private structure
1766 */
1767static int atl1c_clean(struct napi_struct *napi, int budget)
1768{
1769 struct atl1c_adapter *adapter =
1770 container_of(napi, struct atl1c_adapter, napi);
1771 int work_done = 0;
1772
1773 /* Keep link state information with original netdev */
1774 if (!netif_carrier_ok(adapter->netdev))
1775 goto quit_polling;
1776 /* just enable one RXQ */
1777 atl1c_clean_rx_irq(adapter, 0, &work_done, budget);
1778
1779 if (work_done < budget) {
1780quit_polling:
1781 napi_complete(napi);
1782 adapter->hw.intr_mask |= ISR_RX_PKT;
1783 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
1784 }
1785 return work_done;
1786}
1787
1788#ifdef CONFIG_NET_POLL_CONTROLLER
1789
1790/*
1791 * Polling 'interrupt' - used by things like netconsole to send skbs
1792 * without having to re-enable interrupts. It's not called while
1793 * the interrupt routine is executing.
1794 */
1795static void atl1c_netpoll(struct net_device *netdev)
1796{
1797 struct atl1c_adapter *adapter = netdev_priv(netdev);
1798
1799 disable_irq(adapter->pdev->irq);
1800 atl1c_intr(adapter->pdev->irq, netdev);
1801 enable_irq(adapter->pdev->irq);
1802}
1803#endif
1804
1805static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type)
1806{
1807 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1808 u16 next_to_use = 0;
1809 u16 next_to_clean = 0;
1810
1811 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1812 next_to_use = tpd_ring->next_to_use;
1813
1814 return (u16)(next_to_clean > next_to_use) ?
1815 (next_to_clean - next_to_use - 1) :
1816 (tpd_ring->count + next_to_clean - next_to_use - 1);
1817}
1818
1819/*
1820 * get next usable tpd
1821 * Note: should call atl1c_tdp_avail to make sure
1822 * there is enough tpd to use
1823 */
1824static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
1825 enum atl1c_trans_queue type)
1826{
1827 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1828 struct atl1c_tpd_desc *tpd_desc;
1829 u16 next_to_use = 0;
1830
1831 next_to_use = tpd_ring->next_to_use;
1832 if (++tpd_ring->next_to_use == tpd_ring->count)
1833 tpd_ring->next_to_use = 0;
1834 tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use);
1835 memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc));
1836 return tpd_desc;
1837}
1838
1839static struct atl1c_buffer *
1840atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd)
1841{
1842 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
1843
1844 return &tpd_ring->buffer_info[tpd -
1845 (struct atl1c_tpd_desc *)tpd_ring->desc];
1846}
1847
1848/* Calculate the transmit packet descript needed*/
1849static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
1850{
1851 u16 tpd_req;
1852 u16 proto_hdr_len = 0;
1853
1854 tpd_req = skb_shinfo(skb)->nr_frags + 1;
1855
1856 if (skb_is_gso(skb)) {
1857 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1858 if (proto_hdr_len < skb_headlen(skb))
1859 tpd_req++;
1860 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1861 tpd_req++;
1862 }
1863 return tpd_req;
1864}
1865
1866static int atl1c_tso_csum(struct atl1c_adapter *adapter,
1867 struct sk_buff *skb,
1868 struct atl1c_tpd_desc **tpd,
1869 enum atl1c_trans_queue type)
1870{
1871 struct pci_dev *pdev = adapter->pdev;
1872 u8 hdr_len;
1873 u32 real_len;
1874 unsigned short offload_type;
1875 int err;
1876
1877 if (skb_is_gso(skb)) {
1878 if (skb_header_cloned(skb)) {
1879 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1880 if (unlikely(err))
1881 return -1;
1882 }
1883 offload_type = skb_shinfo(skb)->gso_type;
1884
1885 if (offload_type & SKB_GSO_TCPV4) {
1886 real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
1887 + ntohs(ip_hdr(skb)->tot_len));
1888
1889 if (real_len < skb->len)
1890 pskb_trim(skb, real_len);
1891
1892 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1893 if (unlikely(skb->len == hdr_len)) {
1894 /* only xsum need */
1895 if (netif_msg_tx_queued(adapter))
1896 dev_warn(&pdev->dev,
1897 "IPV4 tso with zero data??\n");
1898 goto check_sum;
1899 } else {
1900 ip_hdr(skb)->check = 0;
1901 tcp_hdr(skb)->check = ~csum_tcpudp_magic(
1902 ip_hdr(skb)->saddr,
1903 ip_hdr(skb)->daddr,
1904 0, IPPROTO_TCP, 0);
1905 (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
1906 }
1907 }
1908
1909 if (offload_type & SKB_GSO_TCPV6) {
1910 struct atl1c_tpd_ext_desc *etpd =
1911 *(struct atl1c_tpd_ext_desc **)(tpd);
1912
1913 memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
1914 *tpd = atl1c_get_tpd(adapter, type);
1915 ipv6_hdr(skb)->payload_len = 0;
1916 /* check payload == 0 byte ? */
1917 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1918 if (unlikely(skb->len == hdr_len)) {
1919 /* only xsum need */
1920 if (netif_msg_tx_queued(adapter))
1921 dev_warn(&pdev->dev,
1922 "IPV6 tso with zero data??\n");
1923 goto check_sum;
1924 } else
1925 tcp_hdr(skb)->check = ~csum_ipv6_magic(
1926 &ipv6_hdr(skb)->saddr,
1927 &ipv6_hdr(skb)->daddr,
1928 0, IPPROTO_TCP, 0);
1929 etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
1930 etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
1931 etpd->pkt_len = cpu_to_le32(skb->len);
1932 (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
1933 }
1934
1935 (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
1936 (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
1937 TPD_TCPHDR_OFFSET_SHIFT;
1938 (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
1939 TPD_MSS_SHIFT;
1940 return 0;
1941 }
1942
1943check_sum:
1944 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1945 u8 css, cso;
1946 cso = skb_transport_offset(skb);
1947
1948 if (unlikely(cso & 0x1)) {
1949 if (netif_msg_tx_err(adapter))
1950 dev_err(&adapter->pdev->dev,
1951 "payload offset should not an event number\n");
1952 return -1;
1953 } else {
1954 css = cso + skb->csum_offset;
1955
1956 (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
1957 TPD_PLOADOFFSET_SHIFT;
1958 (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
1959 TPD_CCSUM_OFFSET_SHIFT;
1960 (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
1961 }
1962 }
1963 return 0;
1964}
1965
1966static void atl1c_tx_map(struct atl1c_adapter *adapter,
1967 struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
1968 enum atl1c_trans_queue type)
1969{
1970 struct atl1c_tpd_desc *use_tpd = NULL;
1971 struct atl1c_buffer *buffer_info = NULL;
1972 u16 buf_len = skb_headlen(skb);
1973 u16 map_len = 0;
1974 u16 mapped_len = 0;
1975 u16 hdr_len = 0;
1976 u16 nr_frags;
1977 u16 f;
1978 int tso;
1979
1980 nr_frags = skb_shinfo(skb)->nr_frags;
1981 tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
1982 if (tso) {
1983 /* TSO */
1984 map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1985 use_tpd = tpd;
1986
1987 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
1988 buffer_info->length = map_len;
1989 buffer_info->dma = pci_map_single(adapter->pdev,
1990 skb->data, hdr_len, PCI_DMA_TODEVICE);
1991 buffer_info->state = ATL1_BUFFER_BUSY;
1992 mapped_len += map_len;
1993 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1994 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
1995 }
1996
1997 if (mapped_len < buf_len) {
1998 /* mapped_len == 0, means we should use the first tpd,
1999 which is given by caller */
2000 if (mapped_len == 0)
2001 use_tpd = tpd;
2002 else {
2003 use_tpd = atl1c_get_tpd(adapter, type);
2004 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2005 use_tpd = atl1c_get_tpd(adapter, type);
2006 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2007 }
2008 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2009 buffer_info->length = buf_len - mapped_len;
2010 buffer_info->dma =
2011 pci_map_single(adapter->pdev, skb->data + mapped_len,
2012 buffer_info->length, PCI_DMA_TODEVICE);
2013 buffer_info->state = ATL1_BUFFER_BUSY;
2014
2015 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2016 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2017 }
2018
2019 for (f = 0; f < nr_frags; f++) {
2020 struct skb_frag_struct *frag;
2021
2022 frag = &skb_shinfo(skb)->frags[f];
2023
2024 use_tpd = atl1c_get_tpd(adapter, type);
2025 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2026
2027 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2028 buffer_info->length = frag->size;
2029 buffer_info->dma =
2030 pci_map_page(adapter->pdev, frag->page,
2031 frag->page_offset,
2032 buffer_info->length,
2033 PCI_DMA_TODEVICE);
2034 buffer_info->state = ATL1_BUFFER_BUSY;
2035
2036 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2037 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2038 }
2039
2040 /* The last tpd */
2041 use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
2042 /* The last buffer info contain the skb address,
2043 so it will be free after unmap */
2044 buffer_info->skb = skb;
2045}
2046
2047static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
2048 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
2049{
2050 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
2051 u32 prod_data;
2052
2053 AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data);
2054 switch (type) {
2055 case atl1c_trans_high:
2056 prod_data &= 0xFFFF0000;
2057 prod_data |= tpd_ring->next_to_use & 0xFFFF;
2058 break;
2059 case atl1c_trans_normal:
2060 prod_data &= 0x0000FFFF;
2061 prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
2062 break;
2063 default:
2064 break;
2065 }
2066 wmb();
2067 AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
2068}
2069
2070static int atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2071{
2072 struct atl1c_adapter *adapter = netdev_priv(netdev);
2073 unsigned long flags;
2074 u16 tpd_req = 1;
2075 struct atl1c_tpd_desc *tpd;
2076 enum atl1c_trans_queue type = atl1c_trans_normal;
2077
2078 if (test_bit(__AT_DOWN, &adapter->flags)) {
2079 dev_kfree_skb_any(skb);
2080 return NETDEV_TX_OK;
2081 }
2082
2083 tpd_req = atl1c_cal_tpd_req(skb);
2084 if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
2085 if (netif_msg_pktdata(adapter))
2086 dev_info(&adapter->pdev->dev, "tx locked\n");
2087 return NETDEV_TX_LOCKED;
2088 }
2089 if (skb->mark == 0x01)
2090 type = atl1c_trans_high;
2091 else
2092 type = atl1c_trans_normal;
2093
2094 if (atl1c_tpd_avail(adapter, type) < tpd_req) {
2095 /* no enough descriptor, just stop queue */
2096 netif_stop_queue(netdev);
2097 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2098 return NETDEV_TX_BUSY;
2099 }
2100
2101 tpd = atl1c_get_tpd(adapter, type);
2102
2103 /* do TSO and check sum */
2104 if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
2105 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2106 dev_kfree_skb_any(skb);
2107 return NETDEV_TX_OK;
2108 }
2109
2110 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2111 u16 vlan = vlan_tx_tag_get(skb);
2112 __le16 tag;
2113
2114 vlan = cpu_to_le16(vlan);
2115 AT_VLAN_TO_TAG(vlan, tag);
2116 tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
2117 tpd->vlan_tag = tag;
2118 }
2119
2120 if (skb_network_offset(skb) != ETH_HLEN)
2121 tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
2122
2123 atl1c_tx_map(adapter, skb, tpd, type);
2124 atl1c_tx_queue(adapter, skb, tpd, type);
2125
2126 netdev->trans_start = jiffies;
2127 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2128 return NETDEV_TX_OK;
2129}
2130
2131static void atl1c_free_irq(struct atl1c_adapter *adapter)
2132{
2133 struct net_device *netdev = adapter->netdev;
2134
2135 free_irq(adapter->pdev->irq, netdev);
2136
2137 if (adapter->have_msi)
2138 pci_disable_msi(adapter->pdev);
2139}
2140
2141static int atl1c_request_irq(struct atl1c_adapter *adapter)
2142{
2143 struct pci_dev *pdev = adapter->pdev;
2144 struct net_device *netdev = adapter->netdev;
2145 int flags = 0;
2146 int err = 0;
2147
2148 adapter->have_msi = true;
2149 err = pci_enable_msi(adapter->pdev);
2150 if (err) {
2151 if (netif_msg_ifup(adapter))
2152 dev_err(&pdev->dev,
2153 "Unable to allocate MSI interrupt Error: %d\n",
2154 err);
2155 adapter->have_msi = false;
2156 } else
2157 netdev->irq = pdev->irq;
2158
2159 if (!adapter->have_msi)
2160 flags |= IRQF_SHARED;
2161 err = request_irq(adapter->pdev->irq, &atl1c_intr, flags,
2162 netdev->name, netdev);
2163 if (err) {
2164 if (netif_msg_ifup(adapter))
2165 dev_err(&pdev->dev,
2166 "Unable to allocate interrupt Error: %d\n",
2167 err);
2168 if (adapter->have_msi)
2169 pci_disable_msi(adapter->pdev);
2170 return err;
2171 }
2172 if (netif_msg_ifup(adapter))
2173 dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
2174 return err;
2175}
2176
2177int atl1c_up(struct atl1c_adapter *adapter)
2178{
2179 struct net_device *netdev = adapter->netdev;
2180 int num;
2181 int err;
2182 int i;
2183
2184 netif_carrier_off(netdev);
2185 atl1c_init_ring_ptrs(adapter);
2186 atl1c_set_multi(netdev);
2187 atl1c_restore_vlan(adapter);
2188
2189 for (i = 0; i < adapter->num_rx_queues; i++) {
2190 num = atl1c_alloc_rx_buffer(adapter, i);
2191 if (unlikely(num == 0)) {
2192 err = -ENOMEM;
2193 goto err_alloc_rx;
2194 }
2195 }
2196
2197 if (atl1c_configure(adapter)) {
2198 err = -EIO;
2199 goto err_up;
2200 }
2201
2202 err = atl1c_request_irq(adapter);
2203 if (unlikely(err))
2204 goto err_up;
2205
2206 clear_bit(__AT_DOWN, &adapter->flags);
2207 napi_enable(&adapter->napi);
2208 atl1c_irq_enable(adapter);
2209 atl1c_check_link_status(adapter);
2210 netif_start_queue(netdev);
2211 return err;
2212
2213err_up:
2214err_alloc_rx:
2215 atl1c_clean_rx_ring(adapter);
2216 return err;
2217}
2218
2219void atl1c_down(struct atl1c_adapter *adapter)
2220{
2221 struct net_device *netdev = adapter->netdev;
2222
2223 atl1c_del_timer(adapter);
2224 atl1c_cancel_work(adapter);
2225
2226 /* signal that we're down so the interrupt handler does not
2227 * reschedule our watchdog timer */
2228 set_bit(__AT_DOWN, &adapter->flags);
2229 netif_carrier_off(netdev);
2230 napi_disable(&adapter->napi);
2231 atl1c_irq_disable(adapter);
2232 atl1c_free_irq(adapter);
2233 AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
2234 /* reset MAC to disable all RX/TX */
2235 atl1c_reset_mac(&adapter->hw);
2236 msleep(1);
2237
2238 adapter->link_speed = SPEED_0;
2239 adapter->link_duplex = -1;
2240 atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
2241 atl1c_clean_tx_ring(adapter, atl1c_trans_high);
2242 atl1c_clean_rx_ring(adapter);
2243}
2244
2245/*
2246 * atl1c_open - Called when a network interface is made active
2247 * @netdev: network interface device structure
2248 *
2249 * Returns 0 on success, negative value on failure
2250 *
2251 * The open entry point is called when a network interface is made
2252 * active by the system (IFF_UP). At this point all resources needed
2253 * for transmit and receive operations are allocated, the interrupt
2254 * handler is registered with the OS, the watchdog timer is started,
2255 * and the stack is notified that the interface is ready.
2256 */
2257static int atl1c_open(struct net_device *netdev)
2258{
2259 struct atl1c_adapter *adapter = netdev_priv(netdev);
2260 int err;
2261
2262 /* disallow open during test */
2263 if (test_bit(__AT_TESTING, &adapter->flags))
2264 return -EBUSY;
2265
2266 /* allocate rx/tx dma buffer & descriptors */
2267 err = atl1c_setup_ring_resources(adapter);
2268 if (unlikely(err))
2269 return err;
2270
2271 err = atl1c_up(adapter);
2272 if (unlikely(err))
2273 goto err_up;
2274
2275 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
2276 u32 phy_data;
2277
2278 AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
2279 phy_data |= MDIO_AP_EN;
2280 AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
2281 }
2282 return 0;
2283
2284err_up:
2285 atl1c_free_irq(adapter);
2286 atl1c_free_ring_resources(adapter);
2287 atl1c_reset_mac(&adapter->hw);
2288 return err;
2289}
2290
2291/*
2292 * atl1c_close - Disables a network interface
2293 * @netdev: network interface device structure
2294 *
2295 * Returns 0, this is not allowed to fail
2296 *
2297 * The close entry point is called when an interface is de-activated
2298 * by the OS. The hardware is still under the drivers control, but
2299 * needs to be disabled. A global MAC reset is issued to stop the
2300 * hardware, and all transmit and receive resources are freed.
2301 */
2302static int atl1c_close(struct net_device *netdev)
2303{
2304 struct atl1c_adapter *adapter = netdev_priv(netdev);
2305
2306 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2307 atl1c_down(adapter);
2308 atl1c_free_ring_resources(adapter);
2309 return 0;
2310}
2311
2312static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2313{
2314 struct net_device *netdev = pci_get_drvdata(pdev);
2315 struct atl1c_adapter *adapter = netdev_priv(netdev);
2316 struct atl1c_hw *hw = &adapter->hw;
2317 u32 ctrl;
2318 u32 mac_ctrl_data;
2319 u32 master_ctrl_data;
2320 u32 wol_ctrl_data;
2321 u16 mii_bmsr_data;
2322 u16 save_autoneg_advertised;
2323 u16 mii_intr_status_data;
2324 u32 wufc = adapter->wol;
2325 u32 i;
2326 int retval = 0;
2327
2328 if (netif_running(netdev)) {
2329 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2330 atl1c_down(adapter);
2331 }
2332 netif_device_detach(netdev);
2333 atl1c_disable_l0s_l1(hw);
2334 retval = pci_save_state(pdev);
2335 if (retval)
2336 return retval;
2337 if (wufc) {
2338 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
2339 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
2340
2341 /* get link status */
2342 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2343 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2344 save_autoneg_advertised = hw->autoneg_advertised;
2345 hw->autoneg_advertised = ADVERTISED_10baseT_Half;
2346 if (atl1c_restart_autoneg(hw) != 0)
2347 if (netif_msg_link(adapter))
2348 dev_warn(&pdev->dev, "phy autoneg failed\n");
2349 hw->phy_configured = false; /* re-init PHY when resume */
2350 hw->autoneg_advertised = save_autoneg_advertised;
2351 /* turn on magic packet wol */
2352 if (wufc & AT_WUFC_MAG)
2353 wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2354
2355 if (wufc & AT_WUFC_LNKC) {
2356 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2357 msleep(100);
2358 atl1c_read_phy_reg(hw, MII_BMSR,
2359 (u16 *)&mii_bmsr_data);
2360 if (mii_bmsr_data & BMSR_LSTATUS)
2361 break;
2362 }
2363 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2364 if (netif_msg_link(adapter))
2365 dev_warn(&pdev->dev,
2366 "%s: Link may change"
2367 "when suspend\n",
2368 atl1c_driver_name);
2369 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2370 /* only link up can wake up */
2371 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
2372 if (netif_msg_link(adapter))
2373 dev_err(&pdev->dev,
2374 "%s: read write phy "
2375 "register failed.\n",
2376 atl1c_driver_name);
2377 goto wol_dis;
2378 }
2379 }
2380 /* clear phy interrupt */
2381 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
2382 /* Config MAC Ctrl register */
2383 mac_ctrl_data = MAC_CTRL_RX_EN;
2384 /* set to 10/100M halt duplex */
2385 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2386 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2387 MAC_CTRL_PRMLEN_MASK) <<
2388 MAC_CTRL_PRMLEN_SHIFT);
2389
2390 if (adapter->vlgrp)
2391 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
2392
2393 /* magic packet maybe Broadcast&multicast&Unicast frame */
2394 if (wufc & AT_WUFC_MAG)
2395 mac_ctrl_data |= MAC_CTRL_BC_EN;
2396
2397 if (netif_msg_hw(adapter))
2398 dev_dbg(&pdev->dev,
2399 "%s: suspend MAC=0x%x\n",
2400 atl1c_driver_name, mac_ctrl_data);
2401 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2402 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2403 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2404
2405 /* pcie patch */
2406 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
2407 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2408 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2409
2410 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2411 goto suspend_exit;
2412 }
2413wol_dis:
2414
2415 /* WOL disabled */
2416 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2417
2418 /* pcie patch */
2419 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
2420 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2421 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2422
2423 atl1c_phy_disable(hw);
2424 hw->phy_configured = false; /* re-init PHY when resume */
2425
2426 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2427suspend_exit:
2428
2429 pci_disable_device(pdev);
2430 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2431
2432 return 0;
2433}
2434
2435static int atl1c_resume(struct pci_dev *pdev)
2436{
2437 struct net_device *netdev = pci_get_drvdata(pdev);
2438 struct atl1c_adapter *adapter = netdev_priv(netdev);
2439
2440 pci_set_power_state(pdev, PCI_D0);
2441 pci_restore_state(pdev);
2442 pci_enable_wake(pdev, PCI_D3hot, 0);
2443 pci_enable_wake(pdev, PCI_D3cold, 0);
2444
2445 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2446
2447 atl1c_phy_reset(&adapter->hw);
2448 atl1c_reset_mac(&adapter->hw);
2449 netif_device_attach(netdev);
2450 if (netif_running(netdev))
2451 atl1c_up(adapter);
2452
2453 return 0;
2454}
2455
2456static void atl1c_shutdown(struct pci_dev *pdev)
2457{
2458 atl1c_suspend(pdev, PMSG_SUSPEND);
2459}
2460
2461static const struct net_device_ops atl1c_netdev_ops = {
2462 .ndo_open = atl1c_open,
2463 .ndo_stop = atl1c_close,
2464 .ndo_validate_addr = eth_validate_addr,
2465 .ndo_start_xmit = atl1c_xmit_frame,
2466 .ndo_set_mac_address = atl1c_set_mac_addr,
2467 .ndo_set_multicast_list = atl1c_set_multi,
2468 .ndo_change_mtu = atl1c_change_mtu,
2469 .ndo_do_ioctl = atl1c_ioctl,
2470 .ndo_tx_timeout = atl1c_tx_timeout,
2471 .ndo_get_stats = atl1c_get_stats,
2472 .ndo_vlan_rx_register = atl1c_vlan_rx_register,
2473#ifdef CONFIG_NET_POLL_CONTROLLER
2474 .ndo_poll_controller = atl1c_netpoll,
2475#endif
2476};
2477
2478static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2479{
2480 SET_NETDEV_DEV(netdev, &pdev->dev);
2481 pci_set_drvdata(pdev, netdev);
2482
2483 netdev->irq = pdev->irq;
2484 netdev->netdev_ops = &atl1c_netdev_ops;
2485 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2486 atl1c_set_ethtool_ops(netdev);
2487
2488 /* TODO: add when ready */
2489 netdev->features = NETIF_F_SG |
2490 NETIF_F_HW_CSUM |
2491 NETIF_F_HW_VLAN_TX |
2492 NETIF_F_HW_VLAN_RX |
2493 NETIF_F_TSO |
2494 NETIF_F_TSO6;
2495 return 0;
2496}
2497
2498/*
2499 * atl1c_probe - Device Initialization Routine
2500 * @pdev: PCI device information struct
2501 * @ent: entry in atl1c_pci_tbl
2502 *
2503 * Returns 0 on success, negative on failure
2504 *
2505 * atl1c_probe initializes an adapter identified by a pci_dev structure.
2506 * The OS initialization, configuring of the adapter private structure,
2507 * and a hardware reset occur.
2508 */
2509static int __devinit atl1c_probe(struct pci_dev *pdev,
2510 const struct pci_device_id *ent)
2511{
2512 struct net_device *netdev;
2513 struct atl1c_adapter *adapter;
2514 static int cards_found;
2515
2516 int err = 0;
2517
2518 /* enable device (incl. PCI PM wakeup and hotplug setup) */
2519 err = pci_enable_device_mem(pdev);
2520 if (err) {
2521 dev_err(&pdev->dev, "cannot enable PCI device\n");
2522 return err;
2523 }
2524
2525 /*
2526 * The atl1c chip can DMA to 64-bit addresses, but it uses a single
2527 * shared register for the high 32 bits, so only a single, aligned,
2528 * 4 GB physical address range can be used at a time.
2529 *
2530 * Supporting 64-bit DMA on this hardware is more trouble than it's
2531 * worth. It is far easier to limit to 32-bit DMA than update
2532 * various kernel subsystems to support the mechanics required by a
2533 * fixed-high-32-bit system.
2534 */
2535 if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) ||
2536 (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) {
2537 dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2538 goto err_dma;
2539 }
2540
2541 err = pci_request_regions(pdev, atl1c_driver_name);
2542 if (err) {
2543 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2544 goto err_pci_reg;
2545 }
2546
2547 pci_set_master(pdev);
2548
2549 netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
2550 if (netdev == NULL) {
2551 err = -ENOMEM;
2552 dev_err(&pdev->dev, "etherdev alloc failed\n");
2553 goto err_alloc_etherdev;
2554 }
2555
2556 err = atl1c_init_netdev(netdev, pdev);
2557 if (err) {
2558 dev_err(&pdev->dev, "init netdevice failed\n");
2559 goto err_init_netdev;
2560 }
2561 adapter = netdev_priv(netdev);
2562 adapter->bd_number = cards_found;
2563 adapter->netdev = netdev;
2564 adapter->pdev = pdev;
2565 adapter->hw.adapter = adapter;
2566 adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
2567 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2568 if (!adapter->hw.hw_addr) {
2569 err = -EIO;
2570 dev_err(&pdev->dev, "cannot map device registers\n");
2571 goto err_ioremap;
2572 }
2573 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2574
2575 /* init mii data */
2576 adapter->mii.dev = netdev;
2577 adapter->mii.mdio_read = atl1c_mdio_read;
2578 adapter->mii.mdio_write = atl1c_mdio_write;
2579 adapter->mii.phy_id_mask = 0x1f;
2580 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
2581 netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
2582 setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
2583 (unsigned long)adapter);
2584 /* setup the private structure */
2585 err = atl1c_sw_init(adapter);
2586 if (err) {
2587 dev_err(&pdev->dev, "net device private data init failed\n");
2588 goto err_sw_init;
2589 }
2590 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
2591 ATL1C_PCIE_PHY_RESET);
2592
2593 /* Init GPHY as early as possible due to power saving issue */
2594 atl1c_phy_reset(&adapter->hw);
2595
2596 err = atl1c_reset_mac(&adapter->hw);
2597 if (err) {
2598 err = -EIO;
2599 goto err_reset;
2600 }
2601
2602 device_init_wakeup(&pdev->dev, 1);
2603 /* reset the controller to
2604 * put the device in a known good starting state */
2605 err = atl1c_phy_init(&adapter->hw);
2606 if (err) {
2607 err = -EIO;
2608 goto err_reset;
2609 }
2610 if (atl1c_read_mac_addr(&adapter->hw) != 0) {
2611 err = -EIO;
2612 dev_err(&pdev->dev, "get mac address failed\n");
2613 goto err_eeprom;
2614 }
2615 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2616 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2617 if (netif_msg_probe(adapter))
2618 dev_dbg(&pdev->dev,
2619 "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
2620 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2621 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2622 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2623
2624 atl1c_hw_set_mac_addr(&adapter->hw);
2625 INIT_WORK(&adapter->reset_task, atl1c_reset_task);
2626 INIT_WORK(&adapter->link_chg_task, atl1c_link_chg_task);
2627 err = register_netdev(netdev);
2628 if (err) {
2629 dev_err(&pdev->dev, "register netdevice failed\n");
2630 goto err_register;
2631 }
2632
2633 if (netif_msg_probe(adapter))
2634 dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION);
2635 cards_found++;
2636 return 0;
2637
2638err_reset:
2639err_register:
2640err_sw_init:
2641err_eeprom:
2642 iounmap(adapter->hw.hw_addr);
2643err_init_netdev:
2644err_ioremap:
2645 free_netdev(netdev);
2646err_alloc_etherdev:
2647 pci_release_regions(pdev);
2648err_pci_reg:
2649err_dma:
2650 pci_disable_device(pdev);
2651 return err;
2652}
2653
2654/*
2655 * atl1c_remove - Device Removal Routine
2656 * @pdev: PCI device information struct
2657 *
2658 * atl1c_remove is called by the PCI subsystem to alert the driver
2659 * that it should release a PCI device. The could be caused by a
2660 * Hot-Plug event, or because the driver is going to be removed from
2661 * memory.
2662 */
2663static void __devexit atl1c_remove(struct pci_dev *pdev)
2664{
2665 struct net_device *netdev = pci_get_drvdata(pdev);
2666 struct atl1c_adapter *adapter = netdev_priv(netdev);
2667
2668 unregister_netdev(netdev);
2669 atl1c_phy_disable(&adapter->hw);
2670
2671 iounmap(adapter->hw.hw_addr);
2672
2673 pci_release_regions(pdev);
2674 pci_disable_device(pdev);
2675 free_netdev(netdev);
2676}
2677
2678/*
2679 * atl1c_io_error_detected - called when PCI error is detected
2680 * @pdev: Pointer to PCI device
2681 * @state: The current pci connection state
2682 *
2683 * This function is called after a PCI bus error affecting
2684 * this device has been detected.
2685 */
2686static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
2687 pci_channel_state_t state)
2688{
2689 struct net_device *netdev = pci_get_drvdata(pdev);
2690 struct atl1c_adapter *adapter = netdev_priv(netdev);
2691
2692 netif_device_detach(netdev);
2693
2694 if (netif_running(netdev))
2695 atl1c_down(adapter);
2696
2697 pci_disable_device(pdev);
2698
2699 /* Request a slot slot reset. */
2700 return PCI_ERS_RESULT_NEED_RESET;
2701}
2702
2703/*
2704 * atl1c_io_slot_reset - called after the pci bus has been reset.
2705 * @pdev: Pointer to PCI device
2706 *
2707 * Restart the card from scratch, as if from a cold-boot. Implementation
2708 * resembles the first-half of the e1000_resume routine.
2709 */
2710static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
2711{
2712 struct net_device *netdev = pci_get_drvdata(pdev);
2713 struct atl1c_adapter *adapter = netdev_priv(netdev);
2714
2715 if (pci_enable_device(pdev)) {
2716 if (netif_msg_hw(adapter))
2717 dev_err(&pdev->dev,
2718 "Cannot re-enable PCI device after reset\n");
2719 return PCI_ERS_RESULT_DISCONNECT;
2720 }
2721 pci_set_master(pdev);
2722
2723 pci_enable_wake(pdev, PCI_D3hot, 0);
2724 pci_enable_wake(pdev, PCI_D3cold, 0);
2725
2726 atl1c_reset_mac(&adapter->hw);
2727
2728 return PCI_ERS_RESULT_RECOVERED;
2729}
2730
2731/*
2732 * atl1c_io_resume - called when traffic can start flowing again.
2733 * @pdev: Pointer to PCI device
2734 *
2735 * This callback is called when the error recovery driver tells us that
2736 * its OK to resume normal operation. Implementation resembles the
2737 * second-half of the atl1c_resume routine.
2738 */
2739static void atl1c_io_resume(struct pci_dev *pdev)
2740{
2741 struct net_device *netdev = pci_get_drvdata(pdev);
2742 struct atl1c_adapter *adapter = netdev_priv(netdev);
2743
2744 if (netif_running(netdev)) {
2745 if (atl1c_up(adapter)) {
2746 if (netif_msg_hw(adapter))
2747 dev_err(&pdev->dev,
2748 "Cannot bring device back up after reset\n");
2749 return;
2750 }
2751 }
2752
2753 netif_device_attach(netdev);
2754}
2755
2756static struct pci_error_handlers atl1c_err_handler = {
2757 .error_detected = atl1c_io_error_detected,
2758 .slot_reset = atl1c_io_slot_reset,
2759 .resume = atl1c_io_resume,
2760};
2761
2762static struct pci_driver atl1c_driver = {
2763 .name = atl1c_driver_name,
2764 .id_table = atl1c_pci_tbl,
2765 .probe = atl1c_probe,
2766 .remove = __devexit_p(atl1c_remove),
2767 /* Power Managment Hooks */
2768 .suspend = atl1c_suspend,
2769 .resume = atl1c_resume,
2770 .shutdown = atl1c_shutdown,
2771 .err_handler = &atl1c_err_handler
2772};
2773
2774/*
2775 * atl1c_init_module - Driver Registration Routine
2776 *
2777 * atl1c_init_module is the first routine called when the driver is
2778 * loaded. All it does is register with the PCI subsystem.
2779 */
2780static int __init atl1c_init_module(void)
2781{
2782 return pci_register_driver(&atl1c_driver);
2783}
2784
2785/*
2786 * atl1c_exit_module - Driver Exit Cleanup Routine
2787 *
2788 * atl1c_exit_module is called just before the driver is removed
2789 * from memory.
2790 */
2791static void __exit atl1c_exit_module(void)
2792{
2793 pci_unregister_driver(&atl1c_driver);
2794}
2795
2796module_init(atl1c_init_module);
2797module_exit(atl1c_exit_module);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 0089746b8d02..bab8a934c33d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -90,6 +90,7 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
90 CH_DEVICE(0x30, 2), /* T3B10 */ 90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */ 91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */ 92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
93 {0,} 94 {0,}
94}; 95};
95 96
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 2d1433077a8e..ac2a974dfe37 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -512,6 +512,13 @@ static const struct adapter_info t3_adap_info[] = {
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
514 &mi1_mdio_ext_ops, "Chelsio T320"}, 514 &mi1_mdio_ext_ops, "Chelsio T320"},
515 {},
516 {},
517 {1, 0,
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T310" },
515}; 522};
516 523
517/* 524/*
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5b910cf63740..b8251e827059 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -6011,9 +6011,20 @@ static void nv_shutdown(struct pci_dev *pdev)
6011 if (netif_running(dev)) 6011 if (netif_running(dev))
6012 nv_close(dev); 6012 nv_close(dev);
6013 6013
6014 nv_restore_mac_addr(pdev); 6014 /*
6015 * Restore the MAC so a kernel started by kexec won't get confused.
6016 * If we really go for poweroff, we must not restore the MAC,
6017 * otherwise the MAC for WOL will be reversed at least on some boards.
6018 */
6019 if (system_state != SYSTEM_POWER_OFF) {
6020 nv_restore_mac_addr(pdev);
6021 }
6015 6022
6016 pci_disable_device(pdev); 6023 pci_disable_device(pdev);
6024 /*
6025 * Apparently it is not possible to reinitialise from D3 hot,
6026 * only put the device into D3 if we really go for poweroff.
6027 */
6017 if (system_state == SYSTEM_POWER_OFF) { 6028 if (system_state == SYSTEM_POWER_OFF) {
6018 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 6029 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
6019 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 6030 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 5f31bbb614af..13f11f402a99 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1175,7 +1175,7 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1175{ 1175{
1176 struct mib_counters *p = &mp->mib_counters; 1176 struct mib_counters *p = &mp->mib_counters;
1177 1177
1178 spin_lock(&mp->mib_counters_lock); 1178 spin_lock_bh(&mp->mib_counters_lock);
1179 p->good_octets_received += mib_read(mp, 0x00); 1179 p->good_octets_received += mib_read(mp, 0x00);
1180 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; 1180 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
1181 p->bad_octets_received += mib_read(mp, 0x08); 1181 p->bad_octets_received += mib_read(mp, 0x08);
@@ -1208,7 +1208,7 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1208 p->bad_crc_event += mib_read(mp, 0x74); 1208 p->bad_crc_event += mib_read(mp, 0x74);
1209 p->collision += mib_read(mp, 0x78); 1209 p->collision += mib_read(mp, 0x78);
1210 p->late_collision += mib_read(mp, 0x7c); 1210 p->late_collision += mib_read(mp, 0x7c);
1211 spin_unlock(&mp->mib_counters_lock); 1211 spin_unlock_bh(&mp->mib_counters_lock);
1212 1212
1213 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1213 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1214} 1214}
@@ -1575,7 +1575,7 @@ oom:
1575 return; 1575 return;
1576 } 1576 }
1577 1577
1578 mc_spec = kmalloc(0x200, GFP_KERNEL); 1578 mc_spec = kmalloc(0x200, GFP_ATOMIC);
1579 if (mc_spec == NULL) 1579 if (mc_spec == NULL)
1580 goto oom; 1580 goto oom;
1581 mc_other = mc_spec + (0x100 >> 2); 1581 mc_other = mc_spec + (0x100 >> 2);
@@ -2216,8 +2216,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
2216 wrlp(mp, INT_MASK, 0x00000000); 2216 wrlp(mp, INT_MASK, 0x00000000);
2217 rdlp(mp, INT_MASK); 2217 rdlp(mp, INT_MASK);
2218 2218
2219 del_timer_sync(&mp->mib_counters_timer);
2220
2221 napi_disable(&mp->napi); 2219 napi_disable(&mp->napi);
2222 2220
2223 del_timer_sync(&mp->rx_oom); 2221 del_timer_sync(&mp->rx_oom);
@@ -2229,6 +2227,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
2229 port_reset(mp); 2227 port_reset(mp);
2230 mv643xx_eth_get_stats(dev); 2228 mv643xx_eth_get_stats(dev);
2231 mib_counters_update(mp); 2229 mib_counters_update(mp);
2230 del_timer_sync(&mp->mib_counters_timer);
2232 2231
2233 skb_queue_purge(&mp->rx_recycle); 2232 skb_queue_purge(&mp->rx_recycle);
2234 2233
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 783c1a7b869e..9a78daec2fe9 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1624,7 +1624,7 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
1624 do { 1624 do {
1625 msleep(1); 1625 msleep(1);
1626 e2cmd = smsc911x_reg_read(pdata, E2P_CMD); 1626 e2cmd = smsc911x_reg_read(pdata, E2P_CMD);
1627 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (timeout--)); 1627 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
1628 1628
1629 if (!timeout) { 1629 if (!timeout) {
1630 SMSC_TRACE(DRV, "TIMED OUT"); 1630 SMSC_TRACE(DRV, "TIMED OUT");
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index a1e4b3895b33..4e15ae068b3f 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -341,7 +341,7 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
341 do { 341 do {
342 msleep(1); 342 msleep(1);
343 e2cmd = smsc9420_reg_read(pd, E2P_CMD); 343 e2cmd = smsc9420_reg_read(pd, E2P_CMD);
344 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (timeout--)); 344 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
345 345
346 if (!timeout) { 346 if (!timeout) {
347 smsc_info(HW, "TIMED OUT"); 347 smsc_info(HW, "TIMED OUT");
@@ -413,6 +413,7 @@ static int smsc9420_ethtool_get_eeprom(struct net_device *dev,
413 } 413 }
414 414
415 memcpy(data, &eeprom_data[eeprom->offset], len); 415 memcpy(data, &eeprom_data[eeprom->offset], len);
416 eeprom->magic = SMSC9420_EEPROM_MAGIC;
416 eeprom->len = len; 417 eeprom->len = len;
417 return 0; 418 return 0;
418} 419}
@@ -423,6 +424,9 @@ static int smsc9420_ethtool_set_eeprom(struct net_device *dev,
423 struct smsc9420_pdata *pd = netdev_priv(dev); 424 struct smsc9420_pdata *pd = netdev_priv(dev);
424 int ret; 425 int ret;
425 426
427 if (eeprom->magic != SMSC9420_EEPROM_MAGIC)
428 return -EINVAL;
429
426 smsc9420_eeprom_enable_access(pd); 430 smsc9420_eeprom_enable_access(pd);
427 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_); 431 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_);
428 ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data); 432 ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data);
diff --git a/drivers/net/smsc9420.h b/drivers/net/smsc9420.h
index 69c351f93f86..e441402f77a2 100644
--- a/drivers/net/smsc9420.h
+++ b/drivers/net/smsc9420.h
@@ -44,6 +44,7 @@
44#define LAN_REGISTER_EXTENT (0x400) 44#define LAN_REGISTER_EXTENT (0x400)
45 45
46#define SMSC9420_EEPROM_SIZE ((u32)11) 46#define SMSC9420_EEPROM_SIZE ((u32)11)
47#define SMSC9420_EEPROM_MAGIC (0x9420)
47 48
48#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4) 49#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4)
49 50
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index feaf0e0577d7..43695b76606f 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -909,7 +909,7 @@ static void check_duplex(struct net_device *dev)
909 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " 909 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
910 "negotiated capability %4.4x.\n", dev->name, 910 "negotiated capability %4.4x.\n", dev->name,
911 duplex ? "full" : "half", np->phys[0], negotiated); 911 duplex ? "full" : "half", np->phys[0], negotiated);
912 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0); 912 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
913 } 913 }
914} 914}
915 915
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 491876341068..8d64b1da0465 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1157,7 +1157,7 @@ static void gem_pcs_reset(struct gem *gp)
1157 if (limit-- <= 0) 1157 if (limit-- <= 0)
1158 break; 1158 break;
1159 } 1159 }
1160 if (limit <= 0) 1160 if (limit < 0)
1161 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1161 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1162 gp->dev->name); 1162 gp->dev->name);
1163} 1163}
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 281373281756..16c528db7251 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -343,7 +343,7 @@ static void lance_init_ring_dvma(struct net_device *dev)
343 ib->phys_addr [5] = dev->dev_addr [4]; 343 ib->phys_addr [5] = dev->dev_addr [4];
344 344
345 /* Setup the Tx ring entries */ 345 /* Setup the Tx ring entries */
346 for (i = 0; i <= TX_RING_SIZE; i++) { 346 for (i = 0; i < TX_RING_SIZE; i++) {
347 leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); 347 leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
348 ib->btx_ring [i].tmd0 = leptr; 348 ib->btx_ring [i].tmd0 = leptr;
349 ib->btx_ring [i].tmd1_hadr = leptr >> 16; 349 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
@@ -399,7 +399,7 @@ static void lance_init_ring_pio(struct net_device *dev)
399 sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); 399 sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
400 400
401 /* Setup the Tx ring entries */ 401 /* Setup the Tx ring entries */
402 for (i = 0; i <= TX_RING_SIZE; i++) { 402 for (i = 0; i < TX_RING_SIZE; i++) {
403 leptr = libbuff_offset(tx_buf, i); 403 leptr = libbuff_offset(tx_buf, i);
404 sbus_writew(leptr, &ib->btx_ring [i].tmd0); 404 sbus_writew(leptr, &ib->btx_ring [i].tmd0);
405 sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); 405 sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 4595962fb8e1..b080f9493d83 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -2237,8 +2237,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2237 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 2237 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2238 if (phyid != TG3_PHY_ID_BCMAC131) { 2238 if (phyid != TG3_PHY_ID_BCMAC131) {
2239 phyid &= TG3_PHY_OUI_MASK; 2239 phyid &= TG3_PHY_OUI_MASK;
2240 if (phyid == TG3_PHY_OUI_1 && 2240 if (phyid == TG3_PHY_OUI_1 ||
2241 phyid == TG3_PHY_OUI_2 && 2241 phyid == TG3_PHY_OUI_2 ||
2242 phyid == TG3_PHY_OUI_3) 2242 phyid == TG3_PHY_OUI_3)
2243 do_low_power = true; 2243 do_low_power = true;
2244 } 2244 }
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 852d0e7c4e62..108bbbeacfb6 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -263,10 +263,11 @@ static void veth_dev_free(struct net_device *dev)
263} 263}
264 264
265static const struct net_device_ops veth_netdev_ops = { 265static const struct net_device_ops veth_netdev_ops = {
266 .ndo_init = veth_dev_init, 266 .ndo_init = veth_dev_init,
267 .ndo_open = veth_open, 267 .ndo_open = veth_open,
268 .ndo_start_xmit = veth_xmit, 268 .ndo_start_xmit = veth_xmit,
269 .ndo_get_stats = veth_get_stats, 269 .ndo_get_stats = veth_get_stats,
270 .ndo_set_mac_address = eth_mac_addr,
270}; 271};
271 272
272static void veth_setup(struct net_device *dev) 273static void veth_setup(struct net_device *dev)
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 067c871cc226..3b9d27ea2950 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -157,7 +157,7 @@ enum {
157 157
158 158
159/* Firmware version we request when pulling the fw image file */ 159/* Firmware version we request when pulling the fw image file */
160#define I2400M_FW_VERSION "1.3" 160#define I2400M_FW_VERSION "1.4"
161 161
162 162
163/** 163/**
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 4fb86a0061d0..f18a919be70b 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -715,6 +715,13 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
715 715
716 if (sbi->s_log_groups_per_flex) { 716 if (sbi->s_log_groups_per_flex) {
717 ret2 = find_group_flex(sb, dir, &group); 717 ret2 = find_group_flex(sb, dir, &group);
718 if (ret2 == -1) {
719 ret2 = find_group_other(sb, dir, &group);
720 if (ret2 == 0 && printk_ratelimit())
721 printk(KERN_NOTICE "ext4: find_group_flex "
722 "failed, fallback succeeded dir %lu\n",
723 dir->i_ino);
724 }
718 goto got_group; 725 goto got_group;
719 } 726 }
720 727
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cbd2ca99d113..51cdd13e1c31 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1368,6 +1368,10 @@ retry:
1368 goto out; 1368 goto out;
1369 } 1369 }
1370 1370
1371 /* We cannot recurse into the filesystem as the transaction is already
1372 * started */
1373 flags |= AOP_FLAG_NOFS;
1374
1371 page = grab_cache_page_write_begin(mapping, index, flags); 1375 page = grab_cache_page_write_begin(mapping, index, flags);
1372 if (!page) { 1376 if (!page) {
1373 ext4_journal_stop(handle); 1377 ext4_journal_stop(handle);
@@ -1377,7 +1381,7 @@ retry:
1377 *pagep = page; 1381 *pagep = page;
1378 1382
1379 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1383 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1380 ext4_get_block); 1384 ext4_get_block);
1381 1385
1382 if (!ret && ext4_should_journal_data(inode)) { 1386 if (!ret && ext4_should_journal_data(inode)) {
1383 ret = walk_page_buffers(handle, page_buffers(page), 1387 ret = walk_page_buffers(handle, page_buffers(page),
@@ -2667,6 +2671,9 @@ retry:
2667 ret = PTR_ERR(handle); 2671 ret = PTR_ERR(handle);
2668 goto out; 2672 goto out;
2669 } 2673 }
2674 /* We cannot recurse into the filesystem as the transaction is already
2675 * started */
2676 flags |= AOP_FLAG_NOFS;
2670 2677
2671 page = grab_cache_page_write_begin(mapping, index, flags); 2678 page = grab_cache_page_write_begin(mapping, index, flags);
2672 if (!page) { 2679 if (!page) {
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 3e76bb9b3ad6..d8bb5c671f42 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -485,8 +485,10 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
485 } 485 }
486 } 486 }
487 unlock_new_inode(inode); 487 unlock_new_inode(inode);
488 } else 488 } else {
489 module_put(de->owner); 489 module_put(de->owner);
490 de_put(de);
491 }
490 return inode; 492 return inode;
491 493
492out_ino: 494out_ino:
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 767d95a6d1b1..2d1345112a42 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -107,7 +107,7 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
107 else 107 else
108 kflags = ppage->flags; 108 kflags = ppage->flags;
109 109
110 uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) | 110 uflags = kpf_copy_bit(kflags, KPF_LOCKED, PG_locked) |
111 kpf_copy_bit(kflags, KPF_ERROR, PG_error) | 111 kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
112 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) | 112 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
113 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) | 113 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h
index 311315b56b61..fd53bfd26470 100644
--- a/include/linux/i2c-dev.h
+++ b/include/linux/i2c-dev.h
@@ -33,7 +33,7 @@
33 */ 33 */
34#define I2C_RETRIES 0x0701 /* number of times a device address should 34#define I2C_RETRIES 0x0701 /* number of times a device address should
35 be polled when not acknowledging */ 35 be polled when not acknowledging */
36#define I2C_TIMEOUT 0x0702 /* set timeout in jiffies - call with int */ 36#define I2C_TIMEOUT 0x0702 /* set timeout in units of 10 ms */
37 37
38/* NOTE: Slave address is 7 or 10 bits, but 10-bit addresses 38/* NOTE: Slave address is 7 or 10 bits, but 10-bit addresses
39 * are NOT supported! (due to code brokenness) 39 * are NOT supported! (due to code brokenness)
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index fcfbfea3af72..c86c3b07604c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -361,7 +361,7 @@ struct i2c_adapter {
361 struct mutex bus_lock; 361 struct mutex bus_lock;
362 struct mutex clist_lock; 362 struct mutex clist_lock;
363 363
364 int timeout; 364 int timeout; /* in jiffies */
365 int retries; 365 int retries;
366 struct device dev; /* the adapter device */ 366 struct device dev; /* the adapter device */
367 367
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index f8ff918c208f..e1ff5b14310e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -210,6 +210,7 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
210 210
211 /* Move the mac addresses to the beginning of the new header. */ 211 /* Move the mac addresses to the beginning of the new header. */
212 memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN); 212 memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
213 skb->mac_header -= VLAN_HLEN;
213 214
214 /* first, the ethernet type */ 215 /* first, the ethernet type */
215 veth->h_vlan_proto = htons(ETH_P_8021Q); 216 veth->h_vlan_proto = htons(ETH_P_8021Q);
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 82df31726a54..f1ed66c43787 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -30,11 +30,14 @@
30 * See Documentation/io_mapping.txt 30 * See Documentation/io_mapping.txt
31 */ 31 */
32 32
33/* this struct isn't actually defined anywhere */
34struct io_mapping;
35
36#ifdef CONFIG_HAVE_ATOMIC_IOMAP 33#ifdef CONFIG_HAVE_ATOMIC_IOMAP
37 34
35struct io_mapping {
36 resource_size_t base;
37 unsigned long size;
38 pgprot_t prot;
39};
40
38/* 41/*
39 * For small address space machines, mapping large objects 42 * For small address space machines, mapping large objects
40 * into the kernel virtual space isn't practical. Where 43 * into the kernel virtual space isn't practical. Where
@@ -43,23 +46,42 @@ struct io_mapping;
43 */ 46 */
44 47
45static inline struct io_mapping * 48static inline struct io_mapping *
46io_mapping_create_wc(unsigned long base, unsigned long size) 49io_mapping_create_wc(resource_size_t base, unsigned long size)
47{ 50{
48 return (struct io_mapping *) base; 51 struct io_mapping *iomap;
52 pgprot_t prot;
53
54 if (!reserve_io_memtype_wc(base, size, &prot))
55 return NULL;
56
57 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
58 if (!iomap)
59 return NULL;
60
61 iomap->base = base;
62 iomap->size = size;
63 iomap->prot = prot;
64 return iomap;
49} 65}
50 66
51static inline void 67static inline void
52io_mapping_free(struct io_mapping *mapping) 68io_mapping_free(struct io_mapping *mapping)
53{ 69{
70 free_io_memtype(mapping->base, mapping->size);
71 kfree(mapping);
54} 72}
55 73
56/* Atomic map/unmap */ 74/* Atomic map/unmap */
57static inline void * 75static inline void *
58io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) 76io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
59{ 77{
60 offset += (unsigned long) mapping; 78 resource_size_t phys_addr;
61 return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0, 79 unsigned long pfn;
62 __pgprot(__PAGE_KERNEL_WC)); 80
81 BUG_ON(offset >= mapping->size);
82 phys_addr = mapping->base + offset;
83 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
84 return iomap_atomic_prot_pfn(pfn, KM_USER0, mapping->prot);
63} 85}
64 86
65static inline void 87static inline void
@@ -71,8 +93,9 @@ io_mapping_unmap_atomic(void *vaddr)
71static inline void * 93static inline void *
72io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 94io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
73{ 95{
74 offset += (unsigned long) mapping; 96 BUG_ON(offset >= mapping->size);
75 return ioremap_wc(offset, PAGE_SIZE); 97 resource_size_t phys_addr = mapping->base + offset;
98 return ioremap_wc(phys_addr, PAGE_SIZE);
76} 99}
77 100
78static inline void 101static inline void
@@ -83,9 +106,12 @@ io_mapping_unmap(void *vaddr)
83 106
84#else 107#else
85 108
109/* this struct isn't actually defined anywhere */
110struct io_mapping;
111
86/* Create the io_mapping object*/ 112/* Create the io_mapping object*/
87static inline struct io_mapping * 113static inline struct io_mapping *
88io_mapping_create_wc(unsigned long base, unsigned long size) 114io_mapping_create_wc(resource_size_t base, unsigned long size)
89{ 115{
90 return (struct io_mapping *) ioremap_wc(base, size); 116 return (struct io_mapping *) ioremap_wc(base, size);
91} 117}
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 32851eef48f0..2ec6cc14a114 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -182,6 +182,14 @@ struct kprobe_blackpoint {
182DECLARE_PER_CPU(struct kprobe *, current_kprobe); 182DECLARE_PER_CPU(struct kprobe *, current_kprobe);
183DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 183DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
184 184
185/*
186 * For #ifdef avoidance:
187 */
188static inline int kprobes_built_in(void)
189{
190 return 1;
191}
192
185#ifdef CONFIG_KRETPROBES 193#ifdef CONFIG_KRETPROBES
186extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, 194extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
187 struct pt_regs *regs); 195 struct pt_regs *regs);
@@ -271,8 +279,16 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
271void kprobe_flush_task(struct task_struct *tk); 279void kprobe_flush_task(struct task_struct *tk);
272void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); 280void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
273 281
274#else /* CONFIG_KPROBES */ 282#else /* !CONFIG_KPROBES: */
275 283
284static inline int kprobes_built_in(void)
285{
286 return 0;
287}
288static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
289{
290 return 0;
291}
276static inline struct kprobe *get_kprobe(void *addr) 292static inline struct kprobe *get_kprobe(void *addr)
277{ 293{
278 return NULL; 294 return NULL;
@@ -329,5 +345,5 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
329static inline void kprobe_flush_task(struct task_struct *tk) 345static inline void kprobe_flush_task(struct task_struct *tk)
330{ 346{
331} 347}
332#endif /* CONFIG_KPROBES */ 348#endif /* CONFIG_KPROBES */
333#endif /* _LINUX_KPROBES_H */ 349#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 139d7c88d9c9..3d1b7bde1283 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -1,5 +1,5 @@
1#ifndef MMIOTRACE_H 1#ifndef _LINUX_MMIOTRACE_H
2#define MMIOTRACE_H 2#define _LINUX_MMIOTRACE_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/list.h> 5#include <linux/list.h>
@@ -13,28 +13,34 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
13 unsigned long condition, struct pt_regs *); 13 unsigned long condition, struct pt_regs *);
14 14
15struct kmmio_probe { 15struct kmmio_probe {
16 struct list_head list; /* kmmio internal list */ 16 /* kmmio internal list: */
17 unsigned long addr; /* start location of the probe point */ 17 struct list_head list;
18 unsigned long len; /* length of the probe region */ 18 /* start location of the probe point: */
19 kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */ 19 unsigned long addr;
20 kmmio_post_handler_t post_handler; /* Called after addr is executed */ 20 /* length of the probe region: */
21 void *private; 21 unsigned long len;
22 /* Called before addr is executed: */
23 kmmio_pre_handler_t pre_handler;
24 /* Called after addr is executed: */
25 kmmio_post_handler_t post_handler;
26 void *private;
22}; 27};
23 28
29extern unsigned int kmmio_count;
30
31extern int register_kmmio_probe(struct kmmio_probe *p);
32extern void unregister_kmmio_probe(struct kmmio_probe *p);
33
34#ifdef CONFIG_MMIOTRACE
24/* kmmio is active by some kmmio_probes? */ 35/* kmmio is active by some kmmio_probes? */
25static inline int is_kmmio_active(void) 36static inline int is_kmmio_active(void)
26{ 37{
27 extern unsigned int kmmio_count;
28 return kmmio_count; 38 return kmmio_count;
29} 39}
30 40
31extern int register_kmmio_probe(struct kmmio_probe *p);
32extern void unregister_kmmio_probe(struct kmmio_probe *p);
33
34/* Called from page fault handler. */ 41/* Called from page fault handler. */
35extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); 42extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
36 43
37#ifdef CONFIG_MMIOTRACE
38/* Called from ioremap.c */ 44/* Called from ioremap.c */
39extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, 45extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
40 void __iomem *addr); 46 void __iomem *addr);
@@ -43,7 +49,17 @@ extern void mmiotrace_iounmap(volatile void __iomem *addr);
43/* For anyone to insert markers. Remember trailing newline. */ 49/* For anyone to insert markers. Remember trailing newline. */
44extern int mmiotrace_printk(const char *fmt, ...) 50extern int mmiotrace_printk(const char *fmt, ...)
45 __attribute__ ((format (printf, 1, 2))); 51 __attribute__ ((format (printf, 1, 2)));
46#else 52#else /* !CONFIG_MMIOTRACE: */
53static inline int is_kmmio_active(void)
54{
55 return 0;
56}
57
58static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr)
59{
60 return 0;
61}
62
47static inline void mmiotrace_ioremap(resource_size_t offset, 63static inline void mmiotrace_ioremap(resource_size_t offset,
48 unsigned long size, void __iomem *addr) 64 unsigned long size, void __iomem *addr)
49{ 65{
@@ -63,28 +79,28 @@ static inline int mmiotrace_printk(const char *fmt, ...)
63#endif /* CONFIG_MMIOTRACE */ 79#endif /* CONFIG_MMIOTRACE */
64 80
65enum mm_io_opcode { 81enum mm_io_opcode {
66 MMIO_READ = 0x1, /* struct mmiotrace_rw */ 82 MMIO_READ = 0x1, /* struct mmiotrace_rw */
67 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ 83 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
68 MMIO_PROBE = 0x3, /* struct mmiotrace_map */ 84 MMIO_PROBE = 0x3, /* struct mmiotrace_map */
69 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ 85 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
70 MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ 86 MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
71}; 87};
72 88
73struct mmiotrace_rw { 89struct mmiotrace_rw {
74 resource_size_t phys; /* PCI address of register */ 90 resource_size_t phys; /* PCI address of register */
75 unsigned long value; 91 unsigned long value;
76 unsigned long pc; /* optional program counter */ 92 unsigned long pc; /* optional program counter */
77 int map_id; 93 int map_id;
78 unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ 94 unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
79 unsigned char width; /* size of register access in bytes */ 95 unsigned char width; /* size of register access in bytes */
80}; 96};
81 97
82struct mmiotrace_map { 98struct mmiotrace_map {
83 resource_size_t phys; /* base address in PCI space */ 99 resource_size_t phys; /* base address in PCI space */
84 unsigned long virt; /* base virtual address */ 100 unsigned long virt; /* base virtual address */
85 unsigned long len; /* mapping size */ 101 unsigned long len; /* mapping size */
86 int map_id; 102 int map_id;
87 unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ 103 unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
88}; 104};
89 105
90/* in kernel/trace/trace_mmiotrace.c */ 106/* in kernel/trace/trace_mmiotrace.c */
@@ -94,4 +110,4 @@ extern void mmio_trace_rw(struct mmiotrace_rw *rw);
94extern void mmio_trace_mapping(struct mmiotrace_map *map); 110extern void mmio_trace_mapping(struct mmiotrace_map *map);
95extern int mmio_trace_printk(const char *fmt, va_list args); 111extern int mmio_trace_printk(const char *fmt, va_list args);
96 112
97#endif /* MMIOTRACE_H */ 113#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cf2cb50f77d1..9dcf956ad18a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -416,15 +416,6 @@ extern void skb_over_panic(struct sk_buff *skb, int len,
416 void *here); 416 void *here);
417extern void skb_under_panic(struct sk_buff *skb, int len, 417extern void skb_under_panic(struct sk_buff *skb, int len,
418 void *here); 418 void *here);
419extern void skb_truesize_bug(struct sk_buff *skb);
420
421static inline void skb_truesize_check(struct sk_buff *skb)
422{
423 int len = sizeof(struct sk_buff) + skb->len;
424
425 if (unlikely((int)skb->truesize < len))
426 skb_truesize_bug(skb);
427}
428 419
429extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 420extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
430 int getfrag(void *from, char *to, int offset, 421 int getfrag(void *from, char *to, int offset,
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 6b58367d145e..6f3c603b0d67 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -41,13 +41,13 @@ static inline void pagefault_enable(void)
41#ifndef ARCH_HAS_NOCACHE_UACCESS 41#ifndef ARCH_HAS_NOCACHE_UACCESS
42 42
43static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 43static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
44 const void __user *from, unsigned long n) 44 const void __user *from, unsigned long n, unsigned long total)
45{ 45{
46 return __copy_from_user_inatomic(to, from, n); 46 return __copy_from_user_inatomic(to, from, n);
47} 47}
48 48
49static inline unsigned long __copy_from_user_nocache(void *to, 49static inline unsigned long __copy_from_user_nocache(void *to,
50 const void __user *from, unsigned long n) 50 const void __user *from, unsigned long n, unsigned long total)
51{ 51{
52 return __copy_from_user(to, from, n); 52 return __copy_from_user(to, from, n);
53} 53}
diff --git a/include/net/sock.h b/include/net/sock.h
index ce3b5b622683..eefeeaf7fc46 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -860,7 +860,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
860 860
861static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 861static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
862{ 862{
863 skb_truesize_check(skb);
864 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 863 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
865 sk->sk_wmem_queued -= skb->truesize; 864 sk->sk_wmem_queued -= skb->truesize;
866 sk_mem_uncharge(sk, skb->truesize); 865 sk_mem_uncharge(sk, skb->truesize);
diff --git a/mm/filemap.c b/mm/filemap.c
index 23acefe51808..60fd56772cc6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid);
1816static size_t __iovec_copy_from_user_inatomic(char *vaddr, 1816static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1817 const struct iovec *iov, size_t base, size_t bytes) 1817 const struct iovec *iov, size_t base, size_t bytes)
1818{ 1818{
1819 size_t copied = 0, left = 0; 1819 size_t copied = 0, left = 0, total = bytes;
1820 1820
1821 while (bytes) { 1821 while (bytes) {
1822 char __user *buf = iov->iov_base + base; 1822 char __user *buf = iov->iov_base + base;
1823 int copy = min(bytes, iov->iov_len - base); 1823 int copy = min(bytes, iov->iov_len - base);
1824 1824
1825 base = 0; 1825 base = 0;
1826 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); 1826 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total);
1827 copied += copy; 1827 copied += copy;
1828 bytes -= copy; 1828 bytes -= copy;
1829 vaddr += copy; 1829 vaddr += copy;
@@ -1851,8 +1851,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
1851 if (likely(i->nr_segs == 1)) { 1851 if (likely(i->nr_segs == 1)) {
1852 int left; 1852 int left;
1853 char __user *buf = i->iov->iov_base + i->iov_offset; 1853 char __user *buf = i->iov->iov_base + i->iov_offset;
1854
1854 left = __copy_from_user_inatomic_nocache(kaddr + offset, 1855 left = __copy_from_user_inatomic_nocache(kaddr + offset,
1855 buf, bytes); 1856 buf, bytes, bytes);
1856 copied = bytes - left; 1857 copied = bytes - left;
1857 } else { 1858 } else {
1858 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1859 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1881,8 @@ size_t iov_iter_copy_from_user(struct page *page,
1880 if (likely(i->nr_segs == 1)) { 1881 if (likely(i->nr_segs == 1)) {
1881 int left; 1882 int left;
1882 char __user *buf = i->iov->iov_base + i->iov_offset; 1883 char __user *buf = i->iov->iov_base + i->iov_offset;
1883 left = __copy_from_user_nocache(kaddr + offset, buf, bytes); 1884
1885 left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
1884 copied = bytes - left; 1886 copied = bytes - left;
1885 } else { 1887 } else {
1886 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1888 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 0c04615651b7..bf54f8a2cf1d 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
354 break; 354 break;
355 355
356 copied = bytes - 356 copied = bytes -
357 __copy_from_user_nocache(xip_mem + offset, buf, bytes); 357 __copy_from_user_nocache(xip_mem + offset, buf, bytes, bytes);
358 358
359 if (likely(copied > 0)) { 359 if (likely(copied > 0)) {
360 status = copied; 360 status = copied;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 903cad46e796..7774c6328970 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1259,6 +1259,7 @@ EXPORT_SYMBOL(vfree);
1259void vunmap(const void *addr) 1259void vunmap(const void *addr)
1260{ 1260{
1261 BUG_ON(in_interrupt()); 1261 BUG_ON(in_interrupt());
1262 might_sleep();
1262 __vunmap(addr, 0); 1263 __vunmap(addr, 0);
1263} 1264}
1264EXPORT_SYMBOL(vunmap); 1265EXPORT_SYMBOL(vunmap);
@@ -1278,6 +1279,8 @@ void *vmap(struct page **pages, unsigned int count,
1278{ 1279{
1279 struct vm_struct *area; 1280 struct vm_struct *area;
1280 1281
1282 might_sleep();
1283
1281 if (count > num_physpages) 1284 if (count > num_physpages)
1282 return NULL; 1285 return NULL;
1283 1286
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55151faaf90c..2adb1a7d361f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net)
32{ 32{
33 /* Must be called with net_mutex held */ 33 /* Must be called with net_mutex held */
34 struct pernet_operations *ops; 34 struct pernet_operations *ops;
35 int error; 35 int error = 0;
36 struct net_generic *ng;
37 36
38 atomic_set(&net->count, 1); 37 atomic_set(&net->count, 1);
38
39#ifdef NETNS_REFCNT_DEBUG 39#ifdef NETNS_REFCNT_DEBUG
40 atomic_set(&net->use_count, 0); 40 atomic_set(&net->use_count, 0);
41#endif 41#endif
42 42
43 error = -ENOMEM;
44 ng = kzalloc(sizeof(struct net_generic) +
45 INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL);
46 if (ng == NULL)
47 goto out;
48
49 ng->len = INITIAL_NET_GEN_PTRS;
50 rcu_assign_pointer(net->gen, ng);
51
52 error = 0;
53 list_for_each_entry(ops, &pernet_list, list) { 43 list_for_each_entry(ops, &pernet_list, list) {
54 if (ops->init) { 44 if (ops->init) {
55 error = ops->init(net); 45 error = ops->init(net);
@@ -70,24 +60,50 @@ out_undo:
70 } 60 }
71 61
72 rcu_barrier(); 62 rcu_barrier();
73 kfree(ng);
74 goto out; 63 goto out;
75} 64}
76 65
66static struct net_generic *net_alloc_generic(void)
67{
68 struct net_generic *ng;
69 size_t generic_size = sizeof(struct net_generic) +
70 INITIAL_NET_GEN_PTRS * sizeof(void *);
71
72 ng = kzalloc(generic_size, GFP_KERNEL);
73 if (ng)
74 ng->len = INITIAL_NET_GEN_PTRS;
75
76 return ng;
77}
78
77#ifdef CONFIG_NET_NS 79#ifdef CONFIG_NET_NS
78static struct kmem_cache *net_cachep; 80static struct kmem_cache *net_cachep;
79static struct workqueue_struct *netns_wq; 81static struct workqueue_struct *netns_wq;
80 82
81static struct net *net_alloc(void) 83static struct net *net_alloc(void)
82{ 84{
83 return kmem_cache_zalloc(net_cachep, GFP_KERNEL); 85 struct net *net = NULL;
86 struct net_generic *ng;
87
88 ng = net_alloc_generic();
89 if (!ng)
90 goto out;
91
92 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
93 if (!net)
94 goto out_free;
95
96 rcu_assign_pointer(net->gen, ng);
97out:
98 return net;
99
100out_free:
101 kfree(ng);
102 goto out;
84} 103}
85 104
86static void net_free(struct net *net) 105static void net_free(struct net *net)
87{ 106{
88 if (!net)
89 return;
90
91#ifdef NETNS_REFCNT_DEBUG 107#ifdef NETNS_REFCNT_DEBUG
92 if (unlikely(atomic_read(&net->use_count) != 0)) { 108 if (unlikely(atomic_read(&net->use_count) != 0)) {
93 printk(KERN_EMERG "network namespace not free! Usage: %d\n", 109 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
@@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
112 err = -ENOMEM; 128 err = -ENOMEM;
113 new_net = net_alloc(); 129 new_net = net_alloc();
114 if (!new_net) 130 if (!new_net)
115 goto out; 131 goto out_err;
116 132
117 mutex_lock(&net_mutex); 133 mutex_lock(&net_mutex);
118 err = setup_net(new_net); 134 err = setup_net(new_net);
119 if (err) 135 if (!err) {
120 goto out_unlock; 136 rtnl_lock();
121 137 list_add_tail(&new_net->list, &net_namespace_list);
122 rtnl_lock(); 138 rtnl_unlock();
123 list_add_tail(&new_net->list, &net_namespace_list); 139 }
124 rtnl_unlock();
125
126
127out_unlock:
128 mutex_unlock(&net_mutex); 140 mutex_unlock(&net_mutex);
141
142 if (err)
143 goto out_free;
129out: 144out:
130 put_net(old_net); 145 put_net(old_net);
131 if (err) {
132 net_free(new_net);
133 new_net = ERR_PTR(err);
134 }
135 return new_net; 146 return new_net;
147
148out_free:
149 net_free(new_net);
150out_err:
151 new_net = ERR_PTR(err);
152 goto out;
136} 153}
137 154
138static void cleanup_net(struct work_struct *work) 155static void cleanup_net(struct work_struct *work)
@@ -188,6 +205,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
188 205
189static int __init net_ns_init(void) 206static int __init net_ns_init(void)
190{ 207{
208 struct net_generic *ng;
191 int err; 209 int err;
192 210
193 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); 211 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
@@ -202,6 +220,12 @@ static int __init net_ns_init(void)
202 panic("Could not create netns workq"); 220 panic("Could not create netns workq");
203#endif 221#endif
204 222
223 ng = net_alloc_generic();
224 if (!ng)
225 panic("Could not allocate generic netns");
226
227 rcu_assign_pointer(init_net.gen, ng);
228
205 mutex_lock(&net_mutex); 229 mutex_lock(&net_mutex);
206 err = setup_net(&init_net); 230 err = setup_net(&init_net);
207 231
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da74b844f4ea..c6a6b166f8d6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -143,14 +143,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
143 BUG(); 143 BUG();
144} 144}
145 145
146void skb_truesize_bug(struct sk_buff *skb)
147{
148 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
149 "len=%u, sizeof(sk_buff)=%Zd\n",
150 skb->truesize, skb->len, sizeof(struct sk_buff));
151}
152EXPORT_SYMBOL(skb_truesize_bug);
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few 146/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the 147 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks. 148 * [BEEP] leaks.
diff --git a/net/core/sock.c b/net/core/sock.c
index 6f2e1337975d..5f97caa158e8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -696,7 +696,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
696 if (len < 0) 696 if (len < 0)
697 return -EINVAL; 697 return -EINVAL;
698 698
699 v.val = 0; 699 memset(&v, 0, sizeof(v));
700 700
701 switch(optname) { 701 switch(optname) {
702 case SO_DEBUG: 702 case SO_DEBUG:
@@ -1137,7 +1137,6 @@ void sock_rfree(struct sk_buff *skb)
1137{ 1137{
1138 struct sock *sk = skb->sk; 1138 struct sock *sk = skb->sk;
1139 1139
1140 skb_truesize_check(skb);
1141 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1140 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1142 sk_mem_uncharge(skb->sk, skb->truesize); 1141 sk_mem_uncharge(skb->sk, skb->truesize);
1143} 1142}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 6bb2635b5ded..7bc992976d29 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -3,11 +3,16 @@
3 * 3 *
4 * This is an implementation of the CIPSO 2.2 protocol as specified in 4 * This is an implementation of the CIPSO 2.2 protocol as specified in
5 * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in 5 * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in
6 * FIPS-188, copies of both documents can be found in the Documentation 6 * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors
7 * directory. While CIPSO never became a full IETF RFC standard many vendors
8 * have chosen to adopt the protocol and over the years it has become a 7 * have chosen to adopt the protocol and over the years it has become a
9 * de-facto standard for labeled networking. 8 * de-facto standard for labeled networking.
10 * 9 *
10 * The CIPSO draft specification can be found in the kernel's Documentation
11 * directory as well as the following URL:
12 * http://netlabel.sourceforge.net/files/draft-ietf-cipso-ipsecurity-01.txt
13 * The FIPS-188 specification can be found at the following URL:
14 * http://www.itl.nist.gov/fipspubs/fip188.htm
15 *
11 * Author: Paul Moore <paul.moore@hp.com> 16 * Author: Paul Moore <paul.moore@hp.com>
12 * 17 *
13 */ 18 */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dda42f0bd7a3..da2c3b8794f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2023 last_lost = tp->snd_una; 2023 last_lost = tp->snd_una;
2024 } 2024 }
2025 2025
2026 /* First pass: retransmit lost packets. */
2027 tcp_for_write_queue_from(skb, sk) { 2026 tcp_for_write_queue_from(skb, sk) {
2028 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2027 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2029 2028
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index f58701a7b728..3f4b26647386 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -490,8 +490,10 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock,
490 lock_sock(sk); 490 lock_sock(sk);
491 rc = netlbl_sock_getattr(sk, &secattr); 491 rc = netlbl_sock_getattr(sk, &secattr);
492 release_sock(sk); 492 release_sock(sk);
493 if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) 493 if (rc == 0)
494 rc = -EACCES; 494 rc = -EACCES;
495 else if (rc == -ENOMSG)
496 rc = 0;
495 netlbl_secattr_destroy(&secattr); 497 netlbl_secattr_destroy(&secattr);
496 } 498 }
497 499