diff options
Diffstat (limited to 'drivers')
52 files changed, 903 insertions, 1112 deletions
diff --git a/drivers/acorn/char/Makefile b/drivers/acorn/char/Makefile index 2fa9a8bf48a0..d006c9f168d2 100644 --- a/drivers/acorn/char/Makefile +++ b/drivers/acorn/char/Makefile | |||
@@ -2,5 +2,4 @@ | |||
2 | # Makefile for the acorn character device drivers. | 2 | # Makefile for the acorn character device drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_ARCH_ACORN) += i2c.o pcf8583.o | ||
6 | obj-$(CONFIG_L7200_KEYB) += defkeymap-l7200.o keyb_l7200.o | 5 | obj-$(CONFIG_L7200_KEYB) += defkeymap-l7200.o keyb_l7200.o |
diff --git a/drivers/acorn/char/i2c.c b/drivers/acorn/char/i2c.c deleted file mode 100644 index d276fd14d63a..000000000000 --- a/drivers/acorn/char/i2c.c +++ /dev/null | |||
@@ -1,368 +0,0 @@ | |||
1 | /* | ||
2 | * linux/drivers/acorn/char/i2c.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ARM IOC/IOMD i2c driver. | ||
11 | * | ||
12 | * On Acorn machines, the following i2c devices are on the bus: | ||
13 | * - PCF8583 real time clock & static RAM | ||
14 | */ | ||
15 | #include <linux/capability.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/time.h> | ||
18 | #include <linux/miscdevice.h> | ||
19 | #include <linux/rtc.h> | ||
20 | #include <linux/i2c.h> | ||
21 | #include <linux/i2c-algo-bit.h> | ||
22 | #include <linux/fs.h> | ||
23 | |||
24 | #include <asm/hardware.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/hardware/ioc.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <asm/uaccess.h> | ||
29 | |||
30 | #include "pcf8583.h" | ||
31 | |||
32 | extern int (*set_rtc)(void); | ||
33 | |||
34 | static struct i2c_client *rtc_client; | ||
35 | static const unsigned char days_in_mon[] = | ||
36 | { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; | ||
37 | |||
38 | #define CMOS_CHECKSUM (63) | ||
39 | |||
40 | /* | ||
41 | * Acorn machines store the year in the static RAM at | ||
42 | * location 128. | ||
43 | */ | ||
44 | #define CMOS_YEAR (64 + 128) | ||
45 | |||
46 | static inline int rtc_command(int cmd, void *data) | ||
47 | { | ||
48 | int ret = -EIO; | ||
49 | |||
50 | if (rtc_client) | ||
51 | ret = rtc_client->driver->command(rtc_client, cmd, data); | ||
52 | |||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Update the century + year bytes in the CMOS RAM, ensuring | ||
58 | * that the check byte is correctly adjusted for the change. | ||
59 | */ | ||
60 | static int rtc_update_year(unsigned int new_year) | ||
61 | { | ||
62 | unsigned char yr[2], chk; | ||
63 | struct mem cmos_year = { CMOS_YEAR, sizeof(yr), yr }; | ||
64 | struct mem cmos_check = { CMOS_CHECKSUM, 1, &chk }; | ||
65 | int ret; | ||
66 | |||
67 | ret = rtc_command(MEM_READ, &cmos_check); | ||
68 | if (ret) | ||
69 | goto out; | ||
70 | ret = rtc_command(MEM_READ, &cmos_year); | ||
71 | if (ret) | ||
72 | goto out; | ||
73 | |||
74 | chk -= yr[1] + yr[0]; | ||
75 | |||
76 | yr[1] = new_year / 100; | ||
77 | yr[0] = new_year % 100; | ||
78 | |||
79 | chk += yr[1] + yr[0]; | ||
80 | |||
81 | ret = rtc_command(MEM_WRITE, &cmos_year); | ||
82 | if (ret == 0) | ||
83 | ret = rtc_command(MEM_WRITE, &cmos_check); | ||
84 | out: | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Read the current RTC time and date, and update xtime. | ||
90 | */ | ||
91 | static void get_rtc_time(struct rtc_tm *rtctm, unsigned int *year) | ||
92 | { | ||
93 | unsigned char ctrl, yr[2]; | ||
94 | struct mem rtcmem = { CMOS_YEAR, sizeof(yr), yr }; | ||
95 | int real_year, year_offset; | ||
96 | |||
97 | /* | ||
98 | * Ensure that the RTC is running. | ||
99 | */ | ||
100 | rtc_command(RTC_GETCTRL, &ctrl); | ||
101 | if (ctrl & 0xc0) { | ||
102 | unsigned char new_ctrl = ctrl & ~0xc0; | ||
103 | |||
104 | printk(KERN_WARNING "RTC: resetting control %02x -> %02x\n", | ||
105 | ctrl, new_ctrl); | ||
106 | |||
107 | rtc_command(RTC_SETCTRL, &new_ctrl); | ||
108 | } | ||
109 | |||
110 | if (rtc_command(RTC_GETDATETIME, rtctm) || | ||
111 | rtc_command(MEM_READ, &rtcmem)) | ||
112 | return; | ||
113 | |||
114 | real_year = yr[0]; | ||
115 | |||
116 | /* | ||
117 | * The RTC year holds the LSB two bits of the current | ||
118 | * year, which should reflect the LSB two bits of the | ||
119 | * CMOS copy of the year. Any difference indicates | ||
120 | * that we have to correct the CMOS version. | ||
121 | */ | ||
122 | year_offset = rtctm->year_off - (real_year & 3); | ||
123 | if (year_offset < 0) | ||
124 | /* | ||
125 | * RTC year wrapped. Adjust it appropriately. | ||
126 | */ | ||
127 | year_offset += 4; | ||
128 | |||
129 | *year = real_year + year_offset + yr[1] * 100; | ||
130 | } | ||
131 | |||
132 | static int set_rtc_time(struct rtc_tm *rtctm, unsigned int year) | ||
133 | { | ||
134 | unsigned char leap; | ||
135 | int ret; | ||
136 | |||
137 | leap = (!(year % 4) && (year % 100)) || !(year % 400); | ||
138 | |||
139 | if (rtctm->mon > 12 || rtctm->mon == 0 || rtctm->mday == 0) | ||
140 | return -EINVAL; | ||
141 | |||
142 | if (rtctm->mday > (days_in_mon[rtctm->mon] + (rtctm->mon == 2 && leap))) | ||
143 | return -EINVAL; | ||
144 | |||
145 | if (rtctm->hours >= 24 || rtctm->mins >= 60 || rtctm->secs >= 60) | ||
146 | return -EINVAL; | ||
147 | |||
148 | /* | ||
149 | * The RTC's own 2-bit year must reflect the least | ||
150 | * significant two bits of the CMOS year. | ||
151 | */ | ||
152 | rtctm->year_off = (year % 100) & 3; | ||
153 | |||
154 | ret = rtc_command(RTC_SETDATETIME, rtctm); | ||
155 | if (ret == 0) | ||
156 | ret = rtc_update_year(year); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Set the RTC time only. Note that | ||
163 | * we do not touch the date. | ||
164 | */ | ||
165 | static int k_set_rtc_time(void) | ||
166 | { | ||
167 | struct rtc_tm new_rtctm, old_rtctm; | ||
168 | unsigned long nowtime = xtime.tv_sec; | ||
169 | |||
170 | if (rtc_command(RTC_GETDATETIME, &old_rtctm)) | ||
171 | return 0; | ||
172 | |||
173 | new_rtctm.cs = xtime.tv_nsec / 10000000; | ||
174 | new_rtctm.secs = nowtime % 60; nowtime /= 60; | ||
175 | new_rtctm.mins = nowtime % 60; nowtime /= 60; | ||
176 | new_rtctm.hours = nowtime % 24; | ||
177 | |||
178 | /* | ||
179 | * avoid writing when we're going to change the day | ||
180 | * of the month. We will retry in the next minute. | ||
181 | * This basically means that if the RTC must not drift | ||
182 | * by more than 1 minute in 11 minutes. | ||
183 | * | ||
184 | * [ rtc: 1/1/2000 23:58:00, real 2/1/2000 00:01:00, | ||
185 | * rtc gets set to 1/1/2000 00:01:00 ] | ||
186 | */ | ||
187 | if ((old_rtctm.hours == 23 && old_rtctm.mins == 59) || | ||
188 | (new_rtctm.hours == 23 && new_rtctm.mins == 59)) | ||
189 | return 1; | ||
190 | |||
191 | return rtc_command(RTC_SETTIME, &new_rtctm); | ||
192 | } | ||
193 | |||
194 | static int rtc_ioctl(struct inode *inode, struct file *file, | ||
195 | unsigned int cmd, unsigned long arg) | ||
196 | { | ||
197 | unsigned int year; | ||
198 | struct rtc_time rtctm; | ||
199 | struct rtc_tm rtc_raw; | ||
200 | |||
201 | switch (cmd) { | ||
202 | case RTC_ALM_READ: | ||
203 | case RTC_ALM_SET: | ||
204 | break; | ||
205 | |||
206 | case RTC_RD_TIME: | ||
207 | memset(&rtctm, 0, sizeof(struct rtc_time)); | ||
208 | get_rtc_time(&rtc_raw, &year); | ||
209 | rtctm.tm_sec = rtc_raw.secs; | ||
210 | rtctm.tm_min = rtc_raw.mins; | ||
211 | rtctm.tm_hour = rtc_raw.hours; | ||
212 | rtctm.tm_mday = rtc_raw.mday; | ||
213 | rtctm.tm_mon = rtc_raw.mon - 1; /* month starts at 0 */ | ||
214 | rtctm.tm_year = year - 1900; /* starts at 1900 */ | ||
215 | return copy_to_user((void *)arg, &rtctm, sizeof(rtctm)) | ||
216 | ? -EFAULT : 0; | ||
217 | |||
218 | case RTC_SET_TIME: | ||
219 | if (!capable(CAP_SYS_TIME)) | ||
220 | return -EACCES; | ||
221 | |||
222 | if (copy_from_user(&rtctm, (void *)arg, sizeof(rtctm))) | ||
223 | return -EFAULT; | ||
224 | rtc_raw.secs = rtctm.tm_sec; | ||
225 | rtc_raw.mins = rtctm.tm_min; | ||
226 | rtc_raw.hours = rtctm.tm_hour; | ||
227 | rtc_raw.mday = rtctm.tm_mday; | ||
228 | rtc_raw.mon = rtctm.tm_mon + 1; | ||
229 | year = rtctm.tm_year + 1900; | ||
230 | return set_rtc_time(&rtc_raw, year); | ||
231 | break; | ||
232 | |||
233 | case RTC_EPOCH_READ: | ||
234 | return put_user(1900, (unsigned long *)arg); | ||
235 | |||
236 | } | ||
237 | return -EINVAL; | ||
238 | } | ||
239 | |||
240 | static const struct file_operations rtc_fops = { | ||
241 | .ioctl = rtc_ioctl, | ||
242 | }; | ||
243 | |||
244 | static struct miscdevice rtc_dev = { | ||
245 | .minor = RTC_MINOR, | ||
246 | .name = "rtc", | ||
247 | .fops = &rtc_fops, | ||
248 | }; | ||
249 | |||
250 | /* IOC / IOMD i2c driver */ | ||
251 | |||
252 | #define FORCE_ONES 0xdc | ||
253 | #define SCL 0x02 | ||
254 | #define SDA 0x01 | ||
255 | |||
256 | /* | ||
257 | * We must preserve all non-i2c output bits in IOC_CONTROL. | ||
258 | * Note also that we need to preserve the value of SCL and | ||
259 | * SDA outputs as well (which may be different from the | ||
260 | * values read back from IOC_CONTROL). | ||
261 | */ | ||
262 | static u_int force_ones; | ||
263 | |||
264 | static void ioc_setscl(void *data, int state) | ||
265 | { | ||
266 | u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA); | ||
267 | u_int ones = force_ones; | ||
268 | |||
269 | if (state) | ||
270 | ones |= SCL; | ||
271 | else | ||
272 | ones &= ~SCL; | ||
273 | |||
274 | force_ones = ones; | ||
275 | |||
276 | ioc_writeb(ioc_control | ones, IOC_CONTROL); | ||
277 | } | ||
278 | |||
279 | static void ioc_setsda(void *data, int state) | ||
280 | { | ||
281 | u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA); | ||
282 | u_int ones = force_ones; | ||
283 | |||
284 | if (state) | ||
285 | ones |= SDA; | ||
286 | else | ||
287 | ones &= ~SDA; | ||
288 | |||
289 | force_ones = ones; | ||
290 | |||
291 | ioc_writeb(ioc_control | ones, IOC_CONTROL); | ||
292 | } | ||
293 | |||
294 | static int ioc_getscl(void *data) | ||
295 | { | ||
296 | return (ioc_readb(IOC_CONTROL) & SCL) != 0; | ||
297 | } | ||
298 | |||
299 | static int ioc_getsda(void *data) | ||
300 | { | ||
301 | return (ioc_readb(IOC_CONTROL) & SDA) != 0; | ||
302 | } | ||
303 | |||
304 | static struct i2c_algo_bit_data ioc_data = { | ||
305 | .setsda = ioc_setsda, | ||
306 | .setscl = ioc_setscl, | ||
307 | .getsda = ioc_getsda, | ||
308 | .getscl = ioc_getscl, | ||
309 | .udelay = 80, | ||
310 | .timeout = 100 | ||
311 | }; | ||
312 | |||
313 | static int ioc_client_reg(struct i2c_client *client) | ||
314 | { | ||
315 | if (client->driver->id == I2C_DRIVERID_PCF8583 && | ||
316 | client->addr == 0x50) { | ||
317 | struct rtc_tm rtctm; | ||
318 | unsigned int year; | ||
319 | struct timespec tv; | ||
320 | |||
321 | rtc_client = client; | ||
322 | get_rtc_time(&rtctm, &year); | ||
323 | |||
324 | tv.tv_nsec = rtctm.cs * 10000000; | ||
325 | tv.tv_sec = mktime(year, rtctm.mon, rtctm.mday, | ||
326 | rtctm.hours, rtctm.mins, rtctm.secs); | ||
327 | do_settimeofday(&tv); | ||
328 | set_rtc = k_set_rtc_time; | ||
329 | } | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | static int ioc_client_unreg(struct i2c_client *client) | ||
335 | { | ||
336 | if (client == rtc_client) { | ||
337 | set_rtc = NULL; | ||
338 | rtc_client = NULL; | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static struct i2c_adapter ioc_ops = { | ||
345 | .id = I2C_HW_B_IOC, | ||
346 | .algo_data = &ioc_data, | ||
347 | .client_register = ioc_client_reg, | ||
348 | .client_unregister = ioc_client_unreg, | ||
349 | }; | ||
350 | |||
351 | static int __init i2c_ioc_init(void) | ||
352 | { | ||
353 | int ret; | ||
354 | |||
355 | force_ones = FORCE_ONES | SCL | SDA; | ||
356 | |||
357 | ret = i2c_bit_add_bus(&ioc_ops); | ||
358 | |||
359 | if (ret >= 0){ | ||
360 | ret = misc_register(&rtc_dev); | ||
361 | if(ret < 0) | ||
362 | i2c_del_adapter(&ioc_ops); | ||
363 | } | ||
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | __initcall(i2c_ioc_init); | ||
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c deleted file mode 100644 index 9b49f316ae92..000000000000 --- a/drivers/acorn/char/pcf8583.c +++ /dev/null | |||
@@ -1,284 +0,0 @@ | |||
1 | /* | ||
2 | * linux/drivers/acorn/char/pcf8583.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Driver for PCF8583 RTC & RAM chip | ||
11 | */ | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/i2c.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/mc146818rtc.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/bcd.h> | ||
20 | |||
21 | #include "pcf8583.h" | ||
22 | |||
23 | static struct i2c_driver pcf8583_driver; | ||
24 | |||
25 | static unsigned short ignore[] = { I2C_CLIENT_END }; | ||
26 | static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END }; | ||
27 | static unsigned short *forces[] = { NULL }; | ||
28 | |||
29 | static struct i2c_client_address_data addr_data = { | ||
30 | .normal_i2c = normal_addr, | ||
31 | .probe = ignore, | ||
32 | .ignore = ignore, | ||
33 | .forces = forces, | ||
34 | }; | ||
35 | |||
36 | #define set_ctrl(x, v) i2c_set_clientdata(x, (void *)(unsigned int)(v)) | ||
37 | #define get_ctrl(x) ((unsigned int)i2c_get_clientdata(x)) | ||
38 | |||
39 | static int | ||
40 | pcf8583_attach(struct i2c_adapter *adap, int addr, int kind) | ||
41 | { | ||
42 | struct i2c_client *c; | ||
43 | unsigned char buf[1], ad[1] = { 0 }; | ||
44 | struct i2c_msg msgs[2] = { | ||
45 | { | ||
46 | .addr = addr, | ||
47 | .flags = 0, | ||
48 | .len = 1, | ||
49 | .buf = ad, | ||
50 | }, { | ||
51 | .addr = addr, | ||
52 | .flags = I2C_M_RD, | ||
53 | .len = 1, | ||
54 | .buf = buf, | ||
55 | } | ||
56 | }; | ||
57 | |||
58 | c = kmalloc(sizeof(*c), GFP_KERNEL); | ||
59 | if (!c) | ||
60 | return -ENOMEM; | ||
61 | |||
62 | memset(c, 0, sizeof(*c)); | ||
63 | c->addr = addr; | ||
64 | c->adapter = adap; | ||
65 | c->driver = &pcf8583_driver; | ||
66 | |||
67 | if (i2c_transfer(c->adapter, msgs, 2) == 2) | ||
68 | set_ctrl(c, buf[0]); | ||
69 | |||
70 | return i2c_attach_client(c); | ||
71 | } | ||
72 | |||
73 | static int | ||
74 | pcf8583_probe(struct i2c_adapter *adap) | ||
75 | { | ||
76 | return i2c_probe(adap, &addr_data, pcf8583_attach); | ||
77 | } | ||
78 | |||
79 | static int | ||
80 | pcf8583_detach(struct i2c_client *client) | ||
81 | { | ||
82 | i2c_detach_client(client); | ||
83 | kfree(client); | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int | ||
88 | pcf8583_get_datetime(struct i2c_client *client, struct rtc_tm *dt) | ||
89 | { | ||
90 | unsigned char buf[8], addr[1] = { 1 }; | ||
91 | struct i2c_msg msgs[2] = { | ||
92 | { | ||
93 | .addr = client->addr, | ||
94 | .flags = 0, | ||
95 | .len = 1, | ||
96 | .buf = addr, | ||
97 | }, { | ||
98 | .addr = client->addr, | ||
99 | .flags = I2C_M_RD, | ||
100 | .len = 6, | ||
101 | .buf = buf, | ||
102 | } | ||
103 | }; | ||
104 | int ret = -EIO; | ||
105 | |||
106 | memset(buf, 0, sizeof(buf)); | ||
107 | |||
108 | ret = i2c_transfer(client->adapter, msgs, 2); | ||
109 | if (ret == 2) { | ||
110 | dt->year_off = buf[4] >> 6; | ||
111 | dt->wday = buf[5] >> 5; | ||
112 | |||
113 | buf[4] &= 0x3f; | ||
114 | buf[5] &= 0x1f; | ||
115 | |||
116 | dt->cs = BCD_TO_BIN(buf[0]); | ||
117 | dt->secs = BCD_TO_BIN(buf[1]); | ||
118 | dt->mins = BCD_TO_BIN(buf[2]); | ||
119 | dt->hours = BCD_TO_BIN(buf[3]); | ||
120 | dt->mday = BCD_TO_BIN(buf[4]); | ||
121 | dt->mon = BCD_TO_BIN(buf[5]); | ||
122 | |||
123 | ret = 0; | ||
124 | } | ||
125 | |||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | static int | ||
130 | pcf8583_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo) | ||
131 | { | ||
132 | unsigned char buf[8]; | ||
133 | int ret, len = 6; | ||
134 | |||
135 | buf[0] = 0; | ||
136 | buf[1] = get_ctrl(client) | 0x80; | ||
137 | buf[2] = BIN_TO_BCD(dt->cs); | ||
138 | buf[3] = BIN_TO_BCD(dt->secs); | ||
139 | buf[4] = BIN_TO_BCD(dt->mins); | ||
140 | buf[5] = BIN_TO_BCD(dt->hours); | ||
141 | |||
142 | if (datetoo) { | ||
143 | len = 8; | ||
144 | buf[6] = BIN_TO_BCD(dt->mday) | (dt->year_off << 6); | ||
145 | buf[7] = BIN_TO_BCD(dt->mon) | (dt->wday << 5); | ||
146 | } | ||
147 | |||
148 | ret = i2c_master_send(client, (char *)buf, len); | ||
149 | if (ret == len) | ||
150 | ret = 0; | ||
151 | |||
152 | buf[1] = get_ctrl(client); | ||
153 | i2c_master_send(client, (char *)buf, 2); | ||
154 | |||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | static int | ||
159 | pcf8583_get_ctrl(struct i2c_client *client, unsigned char *ctrl) | ||
160 | { | ||
161 | *ctrl = get_ctrl(client); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int | ||
166 | pcf8583_set_ctrl(struct i2c_client *client, unsigned char *ctrl) | ||
167 | { | ||
168 | unsigned char buf[2]; | ||
169 | |||
170 | buf[0] = 0; | ||
171 | buf[1] = *ctrl; | ||
172 | set_ctrl(client, *ctrl); | ||
173 | |||
174 | return i2c_master_send(client, (char *)buf, 2); | ||
175 | } | ||
176 | |||
177 | static int | ||
178 | pcf8583_read_mem(struct i2c_client *client, struct mem *mem) | ||
179 | { | ||
180 | unsigned char addr[1]; | ||
181 | struct i2c_msg msgs[2] = { | ||
182 | { | ||
183 | .addr = client->addr, | ||
184 | .flags = 0, | ||
185 | .len = 1, | ||
186 | .buf = addr, | ||
187 | }, { | ||
188 | .addr = client->addr, | ||
189 | .flags = I2C_M_RD, | ||
190 | .len = mem->nr, | ||
191 | .buf = mem->data, | ||
192 | } | ||
193 | }; | ||
194 | |||
195 | if (mem->loc < 8) | ||
196 | return -EINVAL; | ||
197 | |||
198 | addr[0] = mem->loc; | ||
199 | |||
200 | return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO; | ||
201 | } | ||
202 | |||
203 | static int | ||
204 | pcf8583_write_mem(struct i2c_client *client, struct mem *mem) | ||
205 | { | ||
206 | unsigned char addr[1]; | ||
207 | struct i2c_msg msgs[2] = { | ||
208 | { | ||
209 | .addr = client->addr, | ||
210 | .flags = 0, | ||
211 | .len = 1, | ||
212 | .buf = addr, | ||
213 | }, { | ||
214 | .addr = client->addr, | ||
215 | .flags = I2C_M_NOSTART, | ||
216 | .len = mem->nr, | ||
217 | .buf = mem->data, | ||
218 | } | ||
219 | }; | ||
220 | |||
221 | if (mem->loc < 8) | ||
222 | return -EINVAL; | ||
223 | |||
224 | addr[0] = mem->loc; | ||
225 | |||
226 | return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO; | ||
227 | } | ||
228 | |||
229 | static int | ||
230 | pcf8583_command(struct i2c_client *client, unsigned int cmd, void *arg) | ||
231 | { | ||
232 | switch (cmd) { | ||
233 | case RTC_GETDATETIME: | ||
234 | return pcf8583_get_datetime(client, arg); | ||
235 | |||
236 | case RTC_SETTIME: | ||
237 | return pcf8583_set_datetime(client, arg, 0); | ||
238 | |||
239 | case RTC_SETDATETIME: | ||
240 | return pcf8583_set_datetime(client, arg, 1); | ||
241 | |||
242 | case RTC_GETCTRL: | ||
243 | return pcf8583_get_ctrl(client, arg); | ||
244 | |||
245 | case RTC_SETCTRL: | ||
246 | return pcf8583_set_ctrl(client, arg); | ||
247 | |||
248 | case MEM_READ: | ||
249 | return pcf8583_read_mem(client, arg); | ||
250 | |||
251 | case MEM_WRITE: | ||
252 | return pcf8583_write_mem(client, arg); | ||
253 | |||
254 | default: | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static struct i2c_driver pcf8583_driver = { | ||
260 | .driver = { | ||
261 | .name = "PCF8583", | ||
262 | }, | ||
263 | .id = I2C_DRIVERID_PCF8583, | ||
264 | .attach_adapter = pcf8583_probe, | ||
265 | .detach_client = pcf8583_detach, | ||
266 | .command = pcf8583_command | ||
267 | }; | ||
268 | |||
269 | static __init int pcf8583_init(void) | ||
270 | { | ||
271 | return i2c_add_driver(&pcf8583_driver); | ||
272 | } | ||
273 | |||
274 | static __exit void pcf8583_exit(void) | ||
275 | { | ||
276 | i2c_del_driver(&pcf8583_driver); | ||
277 | } | ||
278 | |||
279 | module_init(pcf8583_init); | ||
280 | module_exit(pcf8583_exit); | ||
281 | |||
282 | MODULE_AUTHOR("Russell King"); | ||
283 | MODULE_DESCRIPTION("PCF8583 I2C RTC driver"); | ||
284 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/acorn/char/pcf8583.h b/drivers/acorn/char/pcf8583.h deleted file mode 100644 index 847f7fdb8763..000000000000 --- a/drivers/acorn/char/pcf8583.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * linux/drivers/acorn/char/pcf8583.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | struct rtc_tm { | ||
11 | unsigned char cs; | ||
12 | unsigned char secs; | ||
13 | unsigned char mins; | ||
14 | unsigned char hours; | ||
15 | unsigned char mday; | ||
16 | unsigned char mon; | ||
17 | unsigned char year_off; | ||
18 | unsigned char wday; | ||
19 | }; | ||
20 | |||
21 | struct mem { | ||
22 | unsigned int loc; | ||
23 | unsigned int nr; | ||
24 | unsigned char *data; | ||
25 | }; | ||
26 | |||
27 | #define RTC_GETDATETIME 0 | ||
28 | #define RTC_SETTIME 1 | ||
29 | #define RTC_SETDATETIME 2 | ||
30 | #define RTC_GETCTRL 3 | ||
31 | #define RTC_SETCTRL 4 | ||
32 | #define MEM_READ 5 | ||
33 | #define MEM_WRITE 6 | ||
34 | |||
35 | #define CTRL_STOP 0x80 | ||
36 | #define CTRL_HOLD 0x40 | ||
37 | #define CTRL_32KHZ 0x00 | ||
38 | #define CTRL_MASK 0x08 | ||
39 | #define CTRL_ALARMEN 0x04 | ||
40 | #define CTRL_ALARM 0x02 | ||
41 | #define CTRL_TIMER 0x01 | ||
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 033319e8b6cf..43763c99ea02 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c | |||
@@ -244,7 +244,6 @@ static void __exit jmicron_exit(void) | |||
244 | { | 244 | { |
245 | pci_unregister_driver(&jmicron_pci_driver); | 245 | pci_unregister_driver(&jmicron_pci_driver); |
246 | } | 246 | } |
247 | #endif | ||
248 | 247 | ||
249 | module_init(jmicron_init); | 248 | module_init(jmicron_init); |
250 | module_exit(jmicron_exit); | 249 | module_exit(jmicron_exit); |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 4d44a2db29dd..fb19dbb31e42 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -495,6 +495,16 @@ config I2C_VERSATILE | |||
495 | This driver can also be built as a module. If so, the module | 495 | This driver can also be built as a module. If so, the module |
496 | will be called i2c-versatile. | 496 | will be called i2c-versatile. |
497 | 497 | ||
498 | config I2C_ACORN | ||
499 | bool "Acorn IOC/IOMD I2C bus support" | ||
500 | depends on I2C && ARCH_ACORN | ||
501 | default y | ||
502 | select I2C_ALGOBIT | ||
503 | help | ||
504 | Say yes if you want to support the I2C bus on Acorn platforms. | ||
505 | |||
506 | If you don't know, say Y. | ||
507 | |||
498 | config I2C_VIA | 508 | config I2C_VIA |
499 | tristate "VIA 82C586B" | 509 | tristate "VIA 82C586B" |
500 | depends on I2C && PCI && EXPERIMENTAL | 510 | depends on I2C && PCI && EXPERIMENTAL |
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 03505aa44bbf..290b54018354 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile | |||
@@ -42,6 +42,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o | |||
42 | obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o | 42 | obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o |
43 | obj-$(CONFIG_I2C_STUB) += i2c-stub.o | 43 | obj-$(CONFIG_I2C_STUB) += i2c-stub.o |
44 | obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o | 44 | obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o |
45 | obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o | ||
45 | obj-$(CONFIG_I2C_VIA) += i2c-via.o | 46 | obj-$(CONFIG_I2C_VIA) += i2c-via.o |
46 | obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o | 47 | obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o |
47 | obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o | 48 | obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o |
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c new file mode 100644 index 000000000000..09bd7f40b90c --- /dev/null +++ b/drivers/i2c/busses/i2c-acorn.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * linux/drivers/acorn/char/i2c.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ARM IOC/IOMD i2c driver. | ||
11 | * | ||
12 | * On Acorn machines, the following i2c devices are on the bus: | ||
13 | * - PCF8583 real time clock & static RAM | ||
14 | */ | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/i2c.h> | ||
17 | #include <linux/i2c-algo-bit.h> | ||
18 | |||
19 | #include <asm/hardware.h> | ||
20 | #include <asm/io.h> | ||
21 | #include <asm/hardware/ioc.h> | ||
22 | #include <asm/system.h> | ||
23 | |||
24 | #define FORCE_ONES 0xdc | ||
25 | #define SCL 0x02 | ||
26 | #define SDA 0x01 | ||
27 | |||
28 | /* | ||
29 | * We must preserve all non-i2c output bits in IOC_CONTROL. | ||
30 | * Note also that we need to preserve the value of SCL and | ||
31 | * SDA outputs as well (which may be different from the | ||
32 | * values read back from IOC_CONTROL). | ||
33 | */ | ||
34 | static u_int force_ones; | ||
35 | |||
36 | static void ioc_setscl(void *data, int state) | ||
37 | { | ||
38 | u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA); | ||
39 | u_int ones = force_ones; | ||
40 | |||
41 | if (state) | ||
42 | ones |= SCL; | ||
43 | else | ||
44 | ones &= ~SCL; | ||
45 | |||
46 | force_ones = ones; | ||
47 | |||
48 | ioc_writeb(ioc_control | ones, IOC_CONTROL); | ||
49 | } | ||
50 | |||
51 | static void ioc_setsda(void *data, int state) | ||
52 | { | ||
53 | u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA); | ||
54 | u_int ones = force_ones; | ||
55 | |||
56 | if (state) | ||
57 | ones |= SDA; | ||
58 | else | ||
59 | ones &= ~SDA; | ||
60 | |||
61 | force_ones = ones; | ||
62 | |||
63 | ioc_writeb(ioc_control | ones, IOC_CONTROL); | ||
64 | } | ||
65 | |||
66 | static int ioc_getscl(void *data) | ||
67 | { | ||
68 | return (ioc_readb(IOC_CONTROL) & SCL) != 0; | ||
69 | } | ||
70 | |||
71 | static int ioc_getsda(void *data) | ||
72 | { | ||
73 | return (ioc_readb(IOC_CONTROL) & SDA) != 0; | ||
74 | } | ||
75 | |||
76 | static struct i2c_algo_bit_data ioc_data = { | ||
77 | .setsda = ioc_setsda, | ||
78 | .setscl = ioc_setscl, | ||
79 | .getsda = ioc_getsda, | ||
80 | .getscl = ioc_getscl, | ||
81 | .udelay = 80, | ||
82 | .timeout = 100 | ||
83 | }; | ||
84 | |||
85 | static struct i2c_adapter ioc_ops = { | ||
86 | .id = I2C_HW_B_IOC, | ||
87 | .algo_data = &ioc_data, | ||
88 | }; | ||
89 | |||
90 | static int __init i2c_ioc_init(void) | ||
91 | { | ||
92 | force_ones = FORCE_ONES | SCL | SDA; | ||
93 | |||
94 | return i2c_bit_add_bus(&ioc_ops); | ||
95 | } | ||
96 | |||
97 | __initcall(i2c_ioc_init); | ||
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 04574a9d4430..0d122bf889db 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include "vmx.h" | 15 | #include "vmx.h" |
16 | #include <linux/kvm.h> | 16 | #include <linux/kvm.h> |
17 | #include <linux/kvm_para.h> | ||
17 | 18 | ||
18 | #define CR0_PE_MASK (1ULL << 0) | 19 | #define CR0_PE_MASK (1ULL << 0) |
19 | #define CR0_TS_MASK (1ULL << 3) | 20 | #define CR0_TS_MASK (1ULL << 3) |
@@ -237,6 +238,9 @@ struct kvm_vcpu { | |||
237 | unsigned long cr0; | 238 | unsigned long cr0; |
238 | unsigned long cr2; | 239 | unsigned long cr2; |
239 | unsigned long cr3; | 240 | unsigned long cr3; |
241 | gpa_t para_state_gpa; | ||
242 | struct page *para_state_page; | ||
243 | gpa_t hypercall_gpa; | ||
240 | unsigned long cr4; | 244 | unsigned long cr4; |
241 | unsigned long cr8; | 245 | unsigned long cr8; |
242 | u64 pdptrs[4]; /* pae */ | 246 | u64 pdptrs[4]; /* pae */ |
@@ -305,6 +309,7 @@ struct kvm { | |||
305 | int busy; | 309 | int busy; |
306 | unsigned long rmap_overflow; | 310 | unsigned long rmap_overflow; |
307 | struct list_head vm_list; | 311 | struct list_head vm_list; |
312 | struct file *filp; | ||
308 | }; | 313 | }; |
309 | 314 | ||
310 | struct kvm_stat { | 315 | struct kvm_stat { |
@@ -339,7 +344,7 @@ struct kvm_arch_ops { | |||
339 | int (*vcpu_create)(struct kvm_vcpu *vcpu); | 344 | int (*vcpu_create)(struct kvm_vcpu *vcpu); |
340 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | 345 | void (*vcpu_free)(struct kvm_vcpu *vcpu); |
341 | 346 | ||
342 | struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu); | 347 | void (*vcpu_load)(struct kvm_vcpu *vcpu); |
343 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 348 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
344 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); | 349 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); |
345 | 350 | ||
@@ -382,6 +387,8 @@ struct kvm_arch_ops { | |||
382 | int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | 387 | int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); |
383 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); | 388 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); |
384 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 389 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
390 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, | ||
391 | unsigned char *hypercall_addr); | ||
385 | }; | 392 | }; |
386 | 393 | ||
387 | extern struct kvm_stat kvm_stat; | 394 | extern struct kvm_stat kvm_stat; |
@@ -476,6 +483,8 @@ void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); | |||
476 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | 483 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
477 | void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 484 | void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
478 | 485 | ||
486 | int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
487 | |||
479 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | 488 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
480 | u32 error_code) | 489 | u32 error_code) |
481 | { | 490 | { |
@@ -523,7 +532,7 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |||
523 | { | 532 | { |
524 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | 533 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); |
525 | 534 | ||
526 | return (struct kvm_mmu_page *)page->private; | 535 | return (struct kvm_mmu_page *)page_private(page); |
527 | } | 536 | } |
528 | 537 | ||
529 | static inline u16 read_fs(void) | 538 | static inline u16 read_fs(void) |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index af866147ff25..a163bca38973 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/kvm.h> | 20 | #include <linux/kvm.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/magic.h> | ||
23 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
24 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
25 | #include <linux/gfp.h> | 26 | #include <linux/gfp.h> |
@@ -36,6 +37,9 @@ | |||
36 | #include <asm/desc.h> | 37 | #include <asm/desc.h> |
37 | #include <linux/sysdev.h> | 38 | #include <linux/sysdev.h> |
38 | #include <linux/cpu.h> | 39 | #include <linux/cpu.h> |
40 | #include <linux/file.h> | ||
41 | #include <linux/fs.h> | ||
42 | #include <linux/mount.h> | ||
39 | 43 | ||
40 | #include "x86_emulate.h" | 44 | #include "x86_emulate.h" |
41 | #include "segment_descriptor.h" | 45 | #include "segment_descriptor.h" |
@@ -72,6 +76,8 @@ static struct kvm_stats_debugfs_item { | |||
72 | 76 | ||
73 | static struct dentry *debugfs_dir; | 77 | static struct dentry *debugfs_dir; |
74 | 78 | ||
79 | struct vfsmount *kvmfs_mnt; | ||
80 | |||
75 | #define MAX_IO_MSRS 256 | 81 | #define MAX_IO_MSRS 256 |
76 | 82 | ||
77 | #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL | 83 | #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL |
@@ -90,6 +96,58 @@ struct segment_descriptor_64 { | |||
90 | 96 | ||
91 | #endif | 97 | #endif |
92 | 98 | ||
99 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | ||
100 | unsigned long arg); | ||
101 | |||
102 | static struct inode *kvmfs_inode(struct file_operations *fops) | ||
103 | { | ||
104 | int error = -ENOMEM; | ||
105 | struct inode *inode = new_inode(kvmfs_mnt->mnt_sb); | ||
106 | |||
107 | if (!inode) | ||
108 | goto eexit_1; | ||
109 | |||
110 | inode->i_fop = fops; | ||
111 | |||
112 | /* | ||
113 | * Mark the inode dirty from the very beginning, | ||
114 | * that way it will never be moved to the dirty | ||
115 | * list because mark_inode_dirty() will think | ||
116 | * that it already _is_ on the dirty list. | ||
117 | */ | ||
118 | inode->i_state = I_DIRTY; | ||
119 | inode->i_mode = S_IRUSR | S_IWUSR; | ||
120 | inode->i_uid = current->fsuid; | ||
121 | inode->i_gid = current->fsgid; | ||
122 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
123 | return inode; | ||
124 | |||
125 | eexit_1: | ||
126 | return ERR_PTR(error); | ||
127 | } | ||
128 | |||
129 | static struct file *kvmfs_file(struct inode *inode, void *private_data) | ||
130 | { | ||
131 | struct file *file = get_empty_filp(); | ||
132 | |||
133 | if (!file) | ||
134 | return ERR_PTR(-ENFILE); | ||
135 | |||
136 | file->f_path.mnt = mntget(kvmfs_mnt); | ||
137 | file->f_path.dentry = d_alloc_anon(inode); | ||
138 | if (!file->f_path.dentry) | ||
139 | return ERR_PTR(-ENOMEM); | ||
140 | file->f_mapping = inode->i_mapping; | ||
141 | |||
142 | file->f_pos = 0; | ||
143 | file->f_flags = O_RDWR; | ||
144 | file->f_op = inode->i_fop; | ||
145 | file->f_mode = FMODE_READ | FMODE_WRITE; | ||
146 | file->f_version = 0; | ||
147 | file->private_data = private_data; | ||
148 | return file; | ||
149 | } | ||
150 | |||
93 | unsigned long segment_base(u16 selector) | 151 | unsigned long segment_base(u16 selector) |
94 | { | 152 | { |
95 | struct descriptor_table gdt; | 153 | struct descriptor_table gdt; |
@@ -126,10 +184,8 @@ static inline int valid_vcpu(int n) | |||
126 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | 184 | return likely(n >= 0 && n < KVM_MAX_VCPUS); |
127 | } | 185 | } |
128 | 186 | ||
129 | int kvm_read_guest(struct kvm_vcpu *vcpu, | 187 | int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size, |
130 | gva_t addr, | 188 | void *dest) |
131 | unsigned long size, | ||
132 | void *dest) | ||
133 | { | 189 | { |
134 | unsigned char *host_buf = dest; | 190 | unsigned char *host_buf = dest; |
135 | unsigned long req_size = size; | 191 | unsigned long req_size = size; |
@@ -161,10 +217,8 @@ int kvm_read_guest(struct kvm_vcpu *vcpu, | |||
161 | } | 217 | } |
162 | EXPORT_SYMBOL_GPL(kvm_read_guest); | 218 | EXPORT_SYMBOL_GPL(kvm_read_guest); |
163 | 219 | ||
164 | int kvm_write_guest(struct kvm_vcpu *vcpu, | 220 | int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size, |
165 | gva_t addr, | 221 | void *data) |
166 | unsigned long size, | ||
167 | void *data) | ||
168 | { | 222 | { |
169 | unsigned char *host_buf = data; | 223 | unsigned char *host_buf = data; |
170 | unsigned long req_size = size; | 224 | unsigned long req_size = size; |
@@ -174,12 +228,15 @@ int kvm_write_guest(struct kvm_vcpu *vcpu, | |||
174 | unsigned now; | 228 | unsigned now; |
175 | unsigned offset; | 229 | unsigned offset; |
176 | hva_t guest_buf; | 230 | hva_t guest_buf; |
231 | gfn_t gfn; | ||
177 | 232 | ||
178 | paddr = gva_to_hpa(vcpu, addr); | 233 | paddr = gva_to_hpa(vcpu, addr); |
179 | 234 | ||
180 | if (is_error_hpa(paddr)) | 235 | if (is_error_hpa(paddr)) |
181 | break; | 236 | break; |
182 | 237 | ||
238 | gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT; | ||
239 | mark_page_dirty(vcpu->kvm, gfn); | ||
183 | guest_buf = (hva_t)kmap_atomic( | 240 | guest_buf = (hva_t)kmap_atomic( |
184 | pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0); | 241 | pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0); |
185 | offset = addr & ~PAGE_MASK; | 242 | offset = addr & ~PAGE_MASK; |
@@ -195,24 +252,30 @@ int kvm_write_guest(struct kvm_vcpu *vcpu, | |||
195 | } | 252 | } |
196 | EXPORT_SYMBOL_GPL(kvm_write_guest); | 253 | EXPORT_SYMBOL_GPL(kvm_write_guest); |
197 | 254 | ||
198 | static int vcpu_slot(struct kvm_vcpu *vcpu) | 255 | /* |
256 | * Switches to specified vcpu, until a matching vcpu_put() | ||
257 | */ | ||
258 | static void vcpu_load(struct kvm_vcpu *vcpu) | ||
199 | { | 259 | { |
200 | return vcpu - vcpu->kvm->vcpus; | 260 | mutex_lock(&vcpu->mutex); |
261 | kvm_arch_ops->vcpu_load(vcpu); | ||
201 | } | 262 | } |
202 | 263 | ||
203 | /* | 264 | /* |
204 | * Switches to specified vcpu, until a matching vcpu_put() | 265 | * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL |
266 | * if the slot is not populated. | ||
205 | */ | 267 | */ |
206 | static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot) | 268 | static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot) |
207 | { | 269 | { |
208 | struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot]; | 270 | struct kvm_vcpu *vcpu = &kvm->vcpus[slot]; |
209 | 271 | ||
210 | mutex_lock(&vcpu->mutex); | 272 | mutex_lock(&vcpu->mutex); |
211 | if (unlikely(!vcpu->vmcs)) { | 273 | if (!vcpu->vmcs) { |
212 | mutex_unlock(&vcpu->mutex); | 274 | mutex_unlock(&vcpu->mutex); |
213 | return NULL; | 275 | return NULL; |
214 | } | 276 | } |
215 | return kvm_arch_ops->vcpu_load(vcpu); | 277 | kvm_arch_ops->vcpu_load(vcpu); |
278 | return vcpu; | ||
216 | } | 279 | } |
217 | 280 | ||
218 | static void vcpu_put(struct kvm_vcpu *vcpu) | 281 | static void vcpu_put(struct kvm_vcpu *vcpu) |
@@ -221,13 +284,13 @@ static void vcpu_put(struct kvm_vcpu *vcpu) | |||
221 | mutex_unlock(&vcpu->mutex); | 284 | mutex_unlock(&vcpu->mutex); |
222 | } | 285 | } |
223 | 286 | ||
224 | static int kvm_dev_open(struct inode *inode, struct file *filp) | 287 | static struct kvm *kvm_create_vm(void) |
225 | { | 288 | { |
226 | struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); | 289 | struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); |
227 | int i; | 290 | int i; |
228 | 291 | ||
229 | if (!kvm) | 292 | if (!kvm) |
230 | return -ENOMEM; | 293 | return ERR_PTR(-ENOMEM); |
231 | 294 | ||
232 | spin_lock_init(&kvm->lock); | 295 | spin_lock_init(&kvm->lock); |
233 | INIT_LIST_HEAD(&kvm->active_mmu_pages); | 296 | INIT_LIST_HEAD(&kvm->active_mmu_pages); |
@@ -243,7 +306,11 @@ static int kvm_dev_open(struct inode *inode, struct file *filp) | |||
243 | list_add(&kvm->vm_list, &vm_list); | 306 | list_add(&kvm->vm_list, &vm_list); |
244 | spin_unlock(&kvm_lock); | 307 | spin_unlock(&kvm_lock); |
245 | } | 308 | } |
246 | filp->private_data = kvm; | 309 | return kvm; |
310 | } | ||
311 | |||
312 | static int kvm_dev_open(struct inode *inode, struct file *filp) | ||
313 | { | ||
247 | return 0; | 314 | return 0; |
248 | } | 315 | } |
249 | 316 | ||
@@ -281,9 +348,10 @@ static void kvm_free_physmem(struct kvm *kvm) | |||
281 | 348 | ||
282 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) | 349 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) |
283 | { | 350 | { |
284 | if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu))) | 351 | if (!vcpu->vmcs) |
285 | return; | 352 | return; |
286 | 353 | ||
354 | vcpu_load(vcpu); | ||
287 | kvm_mmu_destroy(vcpu); | 355 | kvm_mmu_destroy(vcpu); |
288 | vcpu_put(vcpu); | 356 | vcpu_put(vcpu); |
289 | kvm_arch_ops->vcpu_free(vcpu); | 357 | kvm_arch_ops->vcpu_free(vcpu); |
@@ -299,14 +367,24 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
299 | 367 | ||
300 | static int kvm_dev_release(struct inode *inode, struct file *filp) | 368 | static int kvm_dev_release(struct inode *inode, struct file *filp) |
301 | { | 369 | { |
302 | struct kvm *kvm = filp->private_data; | 370 | return 0; |
371 | } | ||
303 | 372 | ||
373 | static void kvm_destroy_vm(struct kvm *kvm) | ||
374 | { | ||
304 | spin_lock(&kvm_lock); | 375 | spin_lock(&kvm_lock); |
305 | list_del(&kvm->vm_list); | 376 | list_del(&kvm->vm_list); |
306 | spin_unlock(&kvm_lock); | 377 | spin_unlock(&kvm_lock); |
307 | kvm_free_vcpus(kvm); | 378 | kvm_free_vcpus(kvm); |
308 | kvm_free_physmem(kvm); | 379 | kvm_free_physmem(kvm); |
309 | kfree(kvm); | 380 | kfree(kvm); |
381 | } | ||
382 | |||
383 | static int kvm_vm_release(struct inode *inode, struct file *filp) | ||
384 | { | ||
385 | struct kvm *kvm = filp->private_data; | ||
386 | |||
387 | kvm_destroy_vm(kvm); | ||
310 | return 0; | 388 | return 0; |
311 | } | 389 | } |
312 | 390 | ||
@@ -457,7 +535,7 @@ EXPORT_SYMBOL_GPL(set_cr4); | |||
457 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 535 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
458 | { | 536 | { |
459 | if (is_long_mode(vcpu)) { | 537 | if (is_long_mode(vcpu)) { |
460 | if ( cr3 & CR3_L_MODE_RESEVED_BITS) { | 538 | if (cr3 & CR3_L_MODE_RESEVED_BITS) { |
461 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | 539 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); |
462 | inject_gp(vcpu); | 540 | inject_gp(vcpu); |
463 | return; | 541 | return; |
@@ -533,55 +611,11 @@ void fx_init(struct kvm_vcpu *vcpu) | |||
533 | } | 611 | } |
534 | EXPORT_SYMBOL_GPL(fx_init); | 612 | EXPORT_SYMBOL_GPL(fx_init); |
535 | 613 | ||
536 | /* | 614 | static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot) |
537 | * Creates some virtual cpus. Good luck creating more than one. | ||
538 | */ | ||
539 | static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n) | ||
540 | { | 615 | { |
541 | int r; | 616 | spin_lock(&vcpu->kvm->lock); |
542 | struct kvm_vcpu *vcpu; | 617 | kvm_mmu_slot_remove_write_access(vcpu, slot); |
543 | 618 | spin_unlock(&vcpu->kvm->lock); | |
544 | r = -EINVAL; | ||
545 | if (!valid_vcpu(n)) | ||
546 | goto out; | ||
547 | |||
548 | vcpu = &kvm->vcpus[n]; | ||
549 | |||
550 | mutex_lock(&vcpu->mutex); | ||
551 | |||
552 | if (vcpu->vmcs) { | ||
553 | mutex_unlock(&vcpu->mutex); | ||
554 | return -EEXIST; | ||
555 | } | ||
556 | |||
557 | vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf, | ||
558 | FX_IMAGE_ALIGN); | ||
559 | vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; | ||
560 | |||
561 | r = kvm_arch_ops->vcpu_create(vcpu); | ||
562 | if (r < 0) | ||
563 | goto out_free_vcpus; | ||
564 | |||
565 | r = kvm_mmu_create(vcpu); | ||
566 | if (r < 0) | ||
567 | goto out_free_vcpus; | ||
568 | |||
569 | kvm_arch_ops->vcpu_load(vcpu); | ||
570 | r = kvm_mmu_setup(vcpu); | ||
571 | if (r >= 0) | ||
572 | r = kvm_arch_ops->vcpu_setup(vcpu); | ||
573 | vcpu_put(vcpu); | ||
574 | |||
575 | if (r < 0) | ||
576 | goto out_free_vcpus; | ||
577 | |||
578 | return 0; | ||
579 | |||
580 | out_free_vcpus: | ||
581 | kvm_free_vcpu(vcpu); | ||
582 | mutex_unlock(&vcpu->mutex); | ||
583 | out: | ||
584 | return r; | ||
585 | } | 619 | } |
586 | 620 | ||
587 | /* | 621 | /* |
@@ -590,8 +624,8 @@ out: | |||
590 | * | 624 | * |
591 | * Discontiguous memory is allowed, mostly for framebuffers. | 625 | * Discontiguous memory is allowed, mostly for framebuffers. |
592 | */ | 626 | */ |
593 | static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm, | 627 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
594 | struct kvm_memory_region *mem) | 628 | struct kvm_memory_region *mem) |
595 | { | 629 | { |
596 | int r; | 630 | int r; |
597 | gfn_t base_gfn; | 631 | gfn_t base_gfn; |
@@ -674,7 +708,7 @@ raced: | |||
674 | | __GFP_ZERO); | 708 | | __GFP_ZERO); |
675 | if (!new.phys_mem[i]) | 709 | if (!new.phys_mem[i]) |
676 | goto out_free; | 710 | goto out_free; |
677 | new.phys_mem[i]->private = 0; | 711 | set_page_private(new.phys_mem[i],0); |
678 | } | 712 | } |
679 | } | 713 | } |
680 | 714 | ||
@@ -711,9 +745,11 @@ raced: | |||
711 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 745 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
712 | struct kvm_vcpu *vcpu; | 746 | struct kvm_vcpu *vcpu; |
713 | 747 | ||
714 | vcpu = vcpu_load(kvm, i); | 748 | vcpu = vcpu_load_slot(kvm, i); |
715 | if (!vcpu) | 749 | if (!vcpu) |
716 | continue; | 750 | continue; |
751 | if (new.flags & KVM_MEM_LOG_DIRTY_PAGES) | ||
752 | do_remove_write_access(vcpu, mem->slot); | ||
717 | kvm_mmu_reset_context(vcpu); | 753 | kvm_mmu_reset_context(vcpu); |
718 | vcpu_put(vcpu); | 754 | vcpu_put(vcpu); |
719 | } | 755 | } |
@@ -729,18 +765,11 @@ out: | |||
729 | return r; | 765 | return r; |
730 | } | 766 | } |
731 | 767 | ||
732 | static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot) | ||
733 | { | ||
734 | spin_lock(&vcpu->kvm->lock); | ||
735 | kvm_mmu_slot_remove_write_access(vcpu, slot); | ||
736 | spin_unlock(&vcpu->kvm->lock); | ||
737 | } | ||
738 | |||
739 | /* | 768 | /* |
740 | * Get (and clear) the dirty memory log for a memory slot. | 769 | * Get (and clear) the dirty memory log for a memory slot. |
741 | */ | 770 | */ |
742 | static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, | 771 | static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
743 | struct kvm_dirty_log *log) | 772 | struct kvm_dirty_log *log) |
744 | { | 773 | { |
745 | struct kvm_memory_slot *memslot; | 774 | struct kvm_memory_slot *memslot; |
746 | int r, i; | 775 | int r, i; |
@@ -765,21 +794,21 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, | |||
765 | if (!memslot->dirty_bitmap) | 794 | if (!memslot->dirty_bitmap) |
766 | goto out; | 795 | goto out; |
767 | 796 | ||
768 | n = ALIGN(memslot->npages, 8) / 8; | 797 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
769 | 798 | ||
770 | for (i = 0; !any && i < n; ++i) | 799 | for (i = 0; !any && i < n/sizeof(long); ++i) |
771 | any = memslot->dirty_bitmap[i]; | 800 | any = memslot->dirty_bitmap[i]; |
772 | 801 | ||
773 | r = -EFAULT; | 802 | r = -EFAULT; |
774 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | 803 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) |
775 | goto out; | 804 | goto out; |
776 | 805 | ||
777 | |||
778 | if (any) { | 806 | if (any) { |
779 | cleared = 0; | 807 | cleared = 0; |
780 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 808 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
781 | struct kvm_vcpu *vcpu = vcpu_load(kvm, i); | 809 | struct kvm_vcpu *vcpu; |
782 | 810 | ||
811 | vcpu = vcpu_load_slot(kvm, i); | ||
783 | if (!vcpu) | 812 | if (!vcpu) |
784 | continue; | 813 | continue; |
785 | if (!cleared) { | 814 | if (!cleared) { |
@@ -903,8 +932,9 @@ static int emulator_read_emulated(unsigned long addr, | |||
903 | return X86EMUL_CONTINUE; | 932 | return X86EMUL_CONTINUE; |
904 | else { | 933 | else { |
905 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | 934 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); |
935 | |||
906 | if (gpa == UNMAPPED_GVA) | 936 | if (gpa == UNMAPPED_GVA) |
907 | return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT; | 937 | return X86EMUL_PROPAGATE_FAULT; |
908 | vcpu->mmio_needed = 1; | 938 | vcpu->mmio_needed = 1; |
909 | vcpu->mmio_phys_addr = gpa; | 939 | vcpu->mmio_phys_addr = gpa; |
910 | vcpu->mmio_size = bytes; | 940 | vcpu->mmio_size = bytes; |
@@ -928,6 +958,7 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
928 | return 0; | 958 | return 0; |
929 | page = gfn_to_page(m, gpa >> PAGE_SHIFT); | 959 | page = gfn_to_page(m, gpa >> PAGE_SHIFT); |
930 | kvm_mmu_pre_write(vcpu, gpa, bytes); | 960 | kvm_mmu_pre_write(vcpu, gpa, bytes); |
961 | mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
931 | virt = kmap_atomic(page, KM_USER0); | 962 | virt = kmap_atomic(page, KM_USER0); |
932 | memcpy(virt + offset_in_page(gpa), &val, bytes); | 963 | memcpy(virt + offset_in_page(gpa), &val, bytes); |
933 | kunmap_atomic(virt, KM_USER0); | 964 | kunmap_atomic(virt, KM_USER0); |
@@ -1142,6 +1173,42 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
1142 | } | 1173 | } |
1143 | EXPORT_SYMBOL_GPL(emulate_instruction); | 1174 | EXPORT_SYMBOL_GPL(emulate_instruction); |
1144 | 1175 | ||
1176 | int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
1177 | { | ||
1178 | unsigned long nr, a0, a1, a2, a3, a4, a5, ret; | ||
1179 | |||
1180 | kvm_arch_ops->decache_regs(vcpu); | ||
1181 | ret = -KVM_EINVAL; | ||
1182 | #ifdef CONFIG_X86_64 | ||
1183 | if (is_long_mode(vcpu)) { | ||
1184 | nr = vcpu->regs[VCPU_REGS_RAX]; | ||
1185 | a0 = vcpu->regs[VCPU_REGS_RDI]; | ||
1186 | a1 = vcpu->regs[VCPU_REGS_RSI]; | ||
1187 | a2 = vcpu->regs[VCPU_REGS_RDX]; | ||
1188 | a3 = vcpu->regs[VCPU_REGS_RCX]; | ||
1189 | a4 = vcpu->regs[VCPU_REGS_R8]; | ||
1190 | a5 = vcpu->regs[VCPU_REGS_R9]; | ||
1191 | } else | ||
1192 | #endif | ||
1193 | { | ||
1194 | nr = vcpu->regs[VCPU_REGS_RBX] & -1u; | ||
1195 | a0 = vcpu->regs[VCPU_REGS_RAX] & -1u; | ||
1196 | a1 = vcpu->regs[VCPU_REGS_RCX] & -1u; | ||
1197 | a2 = vcpu->regs[VCPU_REGS_RDX] & -1u; | ||
1198 | a3 = vcpu->regs[VCPU_REGS_RSI] & -1u; | ||
1199 | a4 = vcpu->regs[VCPU_REGS_RDI] & -1u; | ||
1200 | a5 = vcpu->regs[VCPU_REGS_RBP] & -1u; | ||
1201 | } | ||
1202 | switch (nr) { | ||
1203 | default: | ||
1204 | ; | ||
1205 | } | ||
1206 | vcpu->regs[VCPU_REGS_RAX] = ret; | ||
1207 | kvm_arch_ops->cache_regs(vcpu); | ||
1208 | return 1; | ||
1209 | } | ||
1210 | EXPORT_SYMBOL_GPL(kvm_hypercall); | ||
1211 | |||
1145 | static u64 mk_cr_64(u64 curr_cr, u32 new_val) | 1212 | static u64 mk_cr_64(u64 curr_cr, u32 new_val) |
1146 | { | 1213 | { |
1147 | return (curr_cr & ~((1ULL << 32) - 1)) | new_val; | 1214 | return (curr_cr & ~((1ULL << 32) - 1)) | new_val; |
@@ -1208,6 +1275,75 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | |||
1208 | } | 1275 | } |
1209 | } | 1276 | } |
1210 | 1277 | ||
1278 | /* | ||
1279 | * Register the para guest with the host: | ||
1280 | */ | ||
1281 | static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa) | ||
1282 | { | ||
1283 | struct kvm_vcpu_para_state *para_state; | ||
1284 | hpa_t para_state_hpa, hypercall_hpa; | ||
1285 | struct page *para_state_page; | ||
1286 | unsigned char *hypercall; | ||
1287 | gpa_t hypercall_gpa; | ||
1288 | |||
1289 | printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n"); | ||
1290 | printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa); | ||
1291 | |||
1292 | /* | ||
1293 | * Needs to be page aligned: | ||
1294 | */ | ||
1295 | if (para_state_gpa != PAGE_ALIGN(para_state_gpa)) | ||
1296 | goto err_gp; | ||
1297 | |||
1298 | para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa); | ||
1299 | printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa); | ||
1300 | if (is_error_hpa(para_state_hpa)) | ||
1301 | goto err_gp; | ||
1302 | |||
1303 | mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT); | ||
1304 | para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT); | ||
1305 | para_state = kmap_atomic(para_state_page, KM_USER0); | ||
1306 | |||
1307 | printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version); | ||
1308 | printk(KERN_DEBUG ".... size: %d\n", para_state->size); | ||
1309 | |||
1310 | para_state->host_version = KVM_PARA_API_VERSION; | ||
1311 | /* | ||
1312 | * We cannot support guests that try to register themselves | ||
1313 | * with a newer API version than the host supports: | ||
1314 | */ | ||
1315 | if (para_state->guest_version > KVM_PARA_API_VERSION) { | ||
1316 | para_state->ret = -KVM_EINVAL; | ||
1317 | goto err_kunmap_skip; | ||
1318 | } | ||
1319 | |||
1320 | hypercall_gpa = para_state->hypercall_gpa; | ||
1321 | hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa); | ||
1322 | printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa); | ||
1323 | if (is_error_hpa(hypercall_hpa)) { | ||
1324 | para_state->ret = -KVM_EINVAL; | ||
1325 | goto err_kunmap_skip; | ||
1326 | } | ||
1327 | |||
1328 | printk(KERN_DEBUG "kvm: para guest successfully registered.\n"); | ||
1329 | vcpu->para_state_page = para_state_page; | ||
1330 | vcpu->para_state_gpa = para_state_gpa; | ||
1331 | vcpu->hypercall_gpa = hypercall_gpa; | ||
1332 | |||
1333 | mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT); | ||
1334 | hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT), | ||
1335 | KM_USER1) + (hypercall_hpa & ~PAGE_MASK); | ||
1336 | kvm_arch_ops->patch_hypercall(vcpu, hypercall); | ||
1337 | kunmap_atomic(hypercall, KM_USER1); | ||
1338 | |||
1339 | para_state->ret = 0; | ||
1340 | err_kunmap_skip: | ||
1341 | kunmap_atomic(para_state, KM_USER0); | ||
1342 | return 0; | ||
1343 | err_gp: | ||
1344 | return 1; | ||
1345 | } | ||
1346 | |||
1211 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | 1347 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
1212 | { | 1348 | { |
1213 | u64 data; | 1349 | u64 data; |
@@ -1316,6 +1452,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1316 | case MSR_IA32_MISC_ENABLE: | 1452 | case MSR_IA32_MISC_ENABLE: |
1317 | vcpu->ia32_misc_enable_msr = data; | 1453 | vcpu->ia32_misc_enable_msr = data; |
1318 | break; | 1454 | break; |
1455 | /* | ||
1456 | * This is the 'probe whether the host is KVM' logic: | ||
1457 | */ | ||
1458 | case MSR_KVM_API_MAGIC: | ||
1459 | return vcpu_register_para(vcpu, data); | ||
1460 | |||
1319 | default: | 1461 | default: |
1320 | printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr); | 1462 | printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr); |
1321 | return 1; | 1463 | return 1; |
@@ -1338,8 +1480,7 @@ void kvm_resched(struct kvm_vcpu *vcpu) | |||
1338 | { | 1480 | { |
1339 | vcpu_put(vcpu); | 1481 | vcpu_put(vcpu); |
1340 | cond_resched(); | 1482 | cond_resched(); |
1341 | /* Cannot fail - no vcpu unplug yet. */ | 1483 | vcpu_load(vcpu); |
1342 | vcpu_load(vcpu->kvm, vcpu_slot(vcpu)); | ||
1343 | } | 1484 | } |
1344 | EXPORT_SYMBOL_GPL(kvm_resched); | 1485 | EXPORT_SYMBOL_GPL(kvm_resched); |
1345 | 1486 | ||
@@ -1361,17 +1502,11 @@ void save_msrs(struct vmx_msr_entry *e, int n) | |||
1361 | } | 1502 | } |
1362 | EXPORT_SYMBOL_GPL(save_msrs); | 1503 | EXPORT_SYMBOL_GPL(save_msrs); |
1363 | 1504 | ||
1364 | static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run) | 1505 | static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1365 | { | 1506 | { |
1366 | struct kvm_vcpu *vcpu; | ||
1367 | int r; | 1507 | int r; |
1368 | 1508 | ||
1369 | if (!valid_vcpu(kvm_run->vcpu)) | 1509 | vcpu_load(vcpu); |
1370 | return -EINVAL; | ||
1371 | |||
1372 | vcpu = vcpu_load(kvm, kvm_run->vcpu); | ||
1373 | if (!vcpu) | ||
1374 | return -ENOENT; | ||
1375 | 1510 | ||
1376 | /* re-sync apic's tpr */ | 1511 | /* re-sync apic's tpr */ |
1377 | vcpu->cr8 = kvm_run->cr8; | 1512 | vcpu->cr8 = kvm_run->cr8; |
@@ -1394,16 +1529,10 @@ static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run) | |||
1394 | return r; | 1529 | return r; |
1395 | } | 1530 | } |
1396 | 1531 | ||
1397 | static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs) | 1532 | static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, |
1533 | struct kvm_regs *regs) | ||
1398 | { | 1534 | { |
1399 | struct kvm_vcpu *vcpu; | 1535 | vcpu_load(vcpu); |
1400 | |||
1401 | if (!valid_vcpu(regs->vcpu)) | ||
1402 | return -EINVAL; | ||
1403 | |||
1404 | vcpu = vcpu_load(kvm, regs->vcpu); | ||
1405 | if (!vcpu) | ||
1406 | return -ENOENT; | ||
1407 | 1536 | ||
1408 | kvm_arch_ops->cache_regs(vcpu); | 1537 | kvm_arch_ops->cache_regs(vcpu); |
1409 | 1538 | ||
@@ -1440,16 +1569,10 @@ static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs) | |||
1440 | return 0; | 1569 | return 0; |
1441 | } | 1570 | } |
1442 | 1571 | ||
1443 | static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs) | 1572 | static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, |
1573 | struct kvm_regs *regs) | ||
1444 | { | 1574 | { |
1445 | struct kvm_vcpu *vcpu; | 1575 | vcpu_load(vcpu); |
1446 | |||
1447 | if (!valid_vcpu(regs->vcpu)) | ||
1448 | return -EINVAL; | ||
1449 | |||
1450 | vcpu = vcpu_load(kvm, regs->vcpu); | ||
1451 | if (!vcpu) | ||
1452 | return -ENOENT; | ||
1453 | 1576 | ||
1454 | vcpu->regs[VCPU_REGS_RAX] = regs->rax; | 1577 | vcpu->regs[VCPU_REGS_RAX] = regs->rax; |
1455 | vcpu->regs[VCPU_REGS_RBX] = regs->rbx; | 1578 | vcpu->regs[VCPU_REGS_RBX] = regs->rbx; |
@@ -1486,16 +1609,12 @@ static void get_segment(struct kvm_vcpu *vcpu, | |||
1486 | return kvm_arch_ops->get_segment(vcpu, var, seg); | 1609 | return kvm_arch_ops->get_segment(vcpu, var, seg); |
1487 | } | 1610 | } |
1488 | 1611 | ||
1489 | static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs) | 1612 | static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1613 | struct kvm_sregs *sregs) | ||
1490 | { | 1614 | { |
1491 | struct kvm_vcpu *vcpu; | ||
1492 | struct descriptor_table dt; | 1615 | struct descriptor_table dt; |
1493 | 1616 | ||
1494 | if (!valid_vcpu(sregs->vcpu)) | 1617 | vcpu_load(vcpu); |
1495 | return -EINVAL; | ||
1496 | vcpu = vcpu_load(kvm, sregs->vcpu); | ||
1497 | if (!vcpu) | ||
1498 | return -ENOENT; | ||
1499 | 1618 | ||
1500 | get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); | 1619 | get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); |
1501 | get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); | 1620 | get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); |
@@ -1537,18 +1656,14 @@ static void set_segment(struct kvm_vcpu *vcpu, | |||
1537 | return kvm_arch_ops->set_segment(vcpu, var, seg); | 1656 | return kvm_arch_ops->set_segment(vcpu, var, seg); |
1538 | } | 1657 | } |
1539 | 1658 | ||
1540 | static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) | 1659 | static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
1660 | struct kvm_sregs *sregs) | ||
1541 | { | 1661 | { |
1542 | struct kvm_vcpu *vcpu; | ||
1543 | int mmu_reset_needed = 0; | 1662 | int mmu_reset_needed = 0; |
1544 | int i; | 1663 | int i; |
1545 | struct descriptor_table dt; | 1664 | struct descriptor_table dt; |
1546 | 1665 | ||
1547 | if (!valid_vcpu(sregs->vcpu)) | 1666 | vcpu_load(vcpu); |
1548 | return -EINVAL; | ||
1549 | vcpu = vcpu_load(kvm, sregs->vcpu); | ||
1550 | if (!vcpu) | ||
1551 | return -ENOENT; | ||
1552 | 1667 | ||
1553 | set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); | 1668 | set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); |
1554 | set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); | 1669 | set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); |
@@ -1654,20 +1769,14 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | |||
1654 | * | 1769 | * |
1655 | * @return number of msrs set successfully. | 1770 | * @return number of msrs set successfully. |
1656 | */ | 1771 | */ |
1657 | static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs, | 1772 | static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, |
1658 | struct kvm_msr_entry *entries, | 1773 | struct kvm_msr_entry *entries, |
1659 | int (*do_msr)(struct kvm_vcpu *vcpu, | 1774 | int (*do_msr)(struct kvm_vcpu *vcpu, |
1660 | unsigned index, u64 *data)) | 1775 | unsigned index, u64 *data)) |
1661 | { | 1776 | { |
1662 | struct kvm_vcpu *vcpu; | ||
1663 | int i; | 1777 | int i; |
1664 | 1778 | ||
1665 | if (!valid_vcpu(msrs->vcpu)) | 1779 | vcpu_load(vcpu); |
1666 | return -EINVAL; | ||
1667 | |||
1668 | vcpu = vcpu_load(kvm, msrs->vcpu); | ||
1669 | if (!vcpu) | ||
1670 | return -ENOENT; | ||
1671 | 1780 | ||
1672 | for (i = 0; i < msrs->nmsrs; ++i) | 1781 | for (i = 0; i < msrs->nmsrs; ++i) |
1673 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) | 1782 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) |
@@ -1683,7 +1792,7 @@ static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs, | |||
1683 | * | 1792 | * |
1684 | * @return number of msrs set successfully. | 1793 | * @return number of msrs set successfully. |
1685 | */ | 1794 | */ |
1686 | static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs, | 1795 | static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, |
1687 | int (*do_msr)(struct kvm_vcpu *vcpu, | 1796 | int (*do_msr)(struct kvm_vcpu *vcpu, |
1688 | unsigned index, u64 *data), | 1797 | unsigned index, u64 *data), |
1689 | int writeback) | 1798 | int writeback) |
@@ -1711,7 +1820,7 @@ static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs, | |||
1711 | if (copy_from_user(entries, user_msrs->entries, size)) | 1820 | if (copy_from_user(entries, user_msrs->entries, size)) |
1712 | goto out_free; | 1821 | goto out_free; |
1713 | 1822 | ||
1714 | r = n = __msr_io(kvm, &msrs, entries, do_msr); | 1823 | r = n = __msr_io(vcpu, &msrs, entries, do_msr); |
1715 | if (r < 0) | 1824 | if (r < 0) |
1716 | goto out_free; | 1825 | goto out_free; |
1717 | 1826 | ||
@@ -1730,38 +1839,31 @@ out: | |||
1730 | /* | 1839 | /* |
1731 | * Translate a guest virtual address to a guest physical address. | 1840 | * Translate a guest virtual address to a guest physical address. |
1732 | */ | 1841 | */ |
1733 | static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr) | 1842 | static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1843 | struct kvm_translation *tr) | ||
1734 | { | 1844 | { |
1735 | unsigned long vaddr = tr->linear_address; | 1845 | unsigned long vaddr = tr->linear_address; |
1736 | struct kvm_vcpu *vcpu; | ||
1737 | gpa_t gpa; | 1846 | gpa_t gpa; |
1738 | 1847 | ||
1739 | vcpu = vcpu_load(kvm, tr->vcpu); | 1848 | vcpu_load(vcpu); |
1740 | if (!vcpu) | 1849 | spin_lock(&vcpu->kvm->lock); |
1741 | return -ENOENT; | ||
1742 | spin_lock(&kvm->lock); | ||
1743 | gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); | 1850 | gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); |
1744 | tr->physical_address = gpa; | 1851 | tr->physical_address = gpa; |
1745 | tr->valid = gpa != UNMAPPED_GVA; | 1852 | tr->valid = gpa != UNMAPPED_GVA; |
1746 | tr->writeable = 1; | 1853 | tr->writeable = 1; |
1747 | tr->usermode = 0; | 1854 | tr->usermode = 0; |
1748 | spin_unlock(&kvm->lock); | 1855 | spin_unlock(&vcpu->kvm->lock); |
1749 | vcpu_put(vcpu); | 1856 | vcpu_put(vcpu); |
1750 | 1857 | ||
1751 | return 0; | 1858 | return 0; |
1752 | } | 1859 | } |
1753 | 1860 | ||
1754 | static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq) | 1861 | static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
1862 | struct kvm_interrupt *irq) | ||
1755 | { | 1863 | { |
1756 | struct kvm_vcpu *vcpu; | ||
1757 | |||
1758 | if (!valid_vcpu(irq->vcpu)) | ||
1759 | return -EINVAL; | ||
1760 | if (irq->irq < 0 || irq->irq >= 256) | 1864 | if (irq->irq < 0 || irq->irq >= 256) |
1761 | return -EINVAL; | 1865 | return -EINVAL; |
1762 | vcpu = vcpu_load(kvm, irq->vcpu); | 1866 | vcpu_load(vcpu); |
1763 | if (!vcpu) | ||
1764 | return -ENOENT; | ||
1765 | 1867 | ||
1766 | set_bit(irq->irq, vcpu->irq_pending); | 1868 | set_bit(irq->irq, vcpu->irq_pending); |
1767 | set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary); | 1869 | set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary); |
@@ -1771,17 +1873,12 @@ static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq) | |||
1771 | return 0; | 1873 | return 0; |
1772 | } | 1874 | } |
1773 | 1875 | ||
1774 | static int kvm_dev_ioctl_debug_guest(struct kvm *kvm, | 1876 | static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |
1775 | struct kvm_debug_guest *dbg) | 1877 | struct kvm_debug_guest *dbg) |
1776 | { | 1878 | { |
1777 | struct kvm_vcpu *vcpu; | ||
1778 | int r; | 1879 | int r; |
1779 | 1880 | ||
1780 | if (!valid_vcpu(dbg->vcpu)) | 1881 | vcpu_load(vcpu); |
1781 | return -EINVAL; | ||
1782 | vcpu = vcpu_load(kvm, dbg->vcpu); | ||
1783 | if (!vcpu) | ||
1784 | return -ENOENT; | ||
1785 | 1882 | ||
1786 | r = kvm_arch_ops->set_guest_debug(vcpu, dbg); | 1883 | r = kvm_arch_ops->set_guest_debug(vcpu, dbg); |
1787 | 1884 | ||
@@ -1790,30 +1887,129 @@ static int kvm_dev_ioctl_debug_guest(struct kvm *kvm, | |||
1790 | return r; | 1887 | return r; |
1791 | } | 1888 | } |
1792 | 1889 | ||
1793 | static long kvm_dev_ioctl(struct file *filp, | 1890 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) |
1794 | unsigned int ioctl, unsigned long arg) | ||
1795 | { | 1891 | { |
1796 | struct kvm *kvm = filp->private_data; | 1892 | struct kvm_vcpu *vcpu = filp->private_data; |
1893 | |||
1894 | fput(vcpu->kvm->filp); | ||
1895 | return 0; | ||
1896 | } | ||
1897 | |||
1898 | static struct file_operations kvm_vcpu_fops = { | ||
1899 | .release = kvm_vcpu_release, | ||
1900 | .unlocked_ioctl = kvm_vcpu_ioctl, | ||
1901 | .compat_ioctl = kvm_vcpu_ioctl, | ||
1902 | }; | ||
1903 | |||
1904 | /* | ||
1905 | * Allocates an inode for the vcpu. | ||
1906 | */ | ||
1907 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) | ||
1908 | { | ||
1909 | int fd, r; | ||
1910 | struct inode *inode; | ||
1911 | struct file *file; | ||
1912 | |||
1913 | atomic_inc(&vcpu->kvm->filp->f_count); | ||
1914 | inode = kvmfs_inode(&kvm_vcpu_fops); | ||
1915 | if (IS_ERR(inode)) { | ||
1916 | r = PTR_ERR(inode); | ||
1917 | goto out1; | ||
1918 | } | ||
1919 | |||
1920 | file = kvmfs_file(inode, vcpu); | ||
1921 | if (IS_ERR(file)) { | ||
1922 | r = PTR_ERR(file); | ||
1923 | goto out2; | ||
1924 | } | ||
1925 | |||
1926 | r = get_unused_fd(); | ||
1927 | if (r < 0) | ||
1928 | goto out3; | ||
1929 | fd = r; | ||
1930 | fd_install(fd, file); | ||
1931 | |||
1932 | return fd; | ||
1933 | |||
1934 | out3: | ||
1935 | fput(file); | ||
1936 | out2: | ||
1937 | iput(inode); | ||
1938 | out1: | ||
1939 | fput(vcpu->kvm->filp); | ||
1940 | return r; | ||
1941 | } | ||
1942 | |||
1943 | /* | ||
1944 | * Creates some virtual cpus. Good luck creating more than one. | ||
1945 | */ | ||
1946 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | ||
1947 | { | ||
1948 | int r; | ||
1949 | struct kvm_vcpu *vcpu; | ||
1950 | |||
1951 | r = -EINVAL; | ||
1952 | if (!valid_vcpu(n)) | ||
1953 | goto out; | ||
1954 | |||
1955 | vcpu = &kvm->vcpus[n]; | ||
1956 | |||
1957 | mutex_lock(&vcpu->mutex); | ||
1958 | |||
1959 | if (vcpu->vmcs) { | ||
1960 | mutex_unlock(&vcpu->mutex); | ||
1961 | return -EEXIST; | ||
1962 | } | ||
1963 | |||
1964 | vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf, | ||
1965 | FX_IMAGE_ALIGN); | ||
1966 | vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; | ||
1967 | |||
1968 | r = kvm_arch_ops->vcpu_create(vcpu); | ||
1969 | if (r < 0) | ||
1970 | goto out_free_vcpus; | ||
1971 | |||
1972 | r = kvm_mmu_create(vcpu); | ||
1973 | if (r < 0) | ||
1974 | goto out_free_vcpus; | ||
1975 | |||
1976 | kvm_arch_ops->vcpu_load(vcpu); | ||
1977 | r = kvm_mmu_setup(vcpu); | ||
1978 | if (r >= 0) | ||
1979 | r = kvm_arch_ops->vcpu_setup(vcpu); | ||
1980 | vcpu_put(vcpu); | ||
1981 | |||
1982 | if (r < 0) | ||
1983 | goto out_free_vcpus; | ||
1984 | |||
1985 | r = create_vcpu_fd(vcpu); | ||
1986 | if (r < 0) | ||
1987 | goto out_free_vcpus; | ||
1988 | |||
1989 | return r; | ||
1990 | |||
1991 | out_free_vcpus: | ||
1992 | kvm_free_vcpu(vcpu); | ||
1993 | mutex_unlock(&vcpu->mutex); | ||
1994 | out: | ||
1995 | return r; | ||
1996 | } | ||
1997 | |||
1998 | static long kvm_vcpu_ioctl(struct file *filp, | ||
1999 | unsigned int ioctl, unsigned long arg) | ||
2000 | { | ||
2001 | struct kvm_vcpu *vcpu = filp->private_data; | ||
1797 | void __user *argp = (void __user *)arg; | 2002 | void __user *argp = (void __user *)arg; |
1798 | int r = -EINVAL; | 2003 | int r = -EINVAL; |
1799 | 2004 | ||
1800 | switch (ioctl) { | 2005 | switch (ioctl) { |
1801 | case KVM_GET_API_VERSION: | ||
1802 | r = KVM_API_VERSION; | ||
1803 | break; | ||
1804 | case KVM_CREATE_VCPU: { | ||
1805 | r = kvm_dev_ioctl_create_vcpu(kvm, arg); | ||
1806 | if (r) | ||
1807 | goto out; | ||
1808 | break; | ||
1809 | } | ||
1810 | case KVM_RUN: { | 2006 | case KVM_RUN: { |
1811 | struct kvm_run kvm_run; | 2007 | struct kvm_run kvm_run; |
1812 | 2008 | ||
1813 | r = -EFAULT; | 2009 | r = -EFAULT; |
1814 | if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) | 2010 | if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) |
1815 | goto out; | 2011 | goto out; |
1816 | r = kvm_dev_ioctl_run(kvm, &kvm_run); | 2012 | r = kvm_vcpu_ioctl_run(vcpu, &kvm_run); |
1817 | if (r < 0 && r != -EINTR) | 2013 | if (r < 0 && r != -EINTR) |
1818 | goto out; | 2014 | goto out; |
1819 | if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { | 2015 | if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { |
@@ -1825,10 +2021,8 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1825 | case KVM_GET_REGS: { | 2021 | case KVM_GET_REGS: { |
1826 | struct kvm_regs kvm_regs; | 2022 | struct kvm_regs kvm_regs; |
1827 | 2023 | ||
1828 | r = -EFAULT; | 2024 | memset(&kvm_regs, 0, sizeof kvm_regs); |
1829 | if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) | 2025 | r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs); |
1830 | goto out; | ||
1831 | r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs); | ||
1832 | if (r) | 2026 | if (r) |
1833 | goto out; | 2027 | goto out; |
1834 | r = -EFAULT; | 2028 | r = -EFAULT; |
@@ -1843,7 +2037,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1843 | r = -EFAULT; | 2037 | r = -EFAULT; |
1844 | if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) | 2038 | if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) |
1845 | goto out; | 2039 | goto out; |
1846 | r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs); | 2040 | r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs); |
1847 | if (r) | 2041 | if (r) |
1848 | goto out; | 2042 | goto out; |
1849 | r = 0; | 2043 | r = 0; |
@@ -1852,10 +2046,8 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1852 | case KVM_GET_SREGS: { | 2046 | case KVM_GET_SREGS: { |
1853 | struct kvm_sregs kvm_sregs; | 2047 | struct kvm_sregs kvm_sregs; |
1854 | 2048 | ||
1855 | r = -EFAULT; | 2049 | memset(&kvm_sregs, 0, sizeof kvm_sregs); |
1856 | if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) | 2050 | r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs); |
1857 | goto out; | ||
1858 | r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs); | ||
1859 | if (r) | 2051 | if (r) |
1860 | goto out; | 2052 | goto out; |
1861 | r = -EFAULT; | 2053 | r = -EFAULT; |
@@ -1870,7 +2062,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1870 | r = -EFAULT; | 2062 | r = -EFAULT; |
1871 | if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) | 2063 | if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) |
1872 | goto out; | 2064 | goto out; |
1873 | r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs); | 2065 | r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs); |
1874 | if (r) | 2066 | if (r) |
1875 | goto out; | 2067 | goto out; |
1876 | r = 0; | 2068 | r = 0; |
@@ -1882,7 +2074,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1882 | r = -EFAULT; | 2074 | r = -EFAULT; |
1883 | if (copy_from_user(&tr, argp, sizeof tr)) | 2075 | if (copy_from_user(&tr, argp, sizeof tr)) |
1884 | goto out; | 2076 | goto out; |
1885 | r = kvm_dev_ioctl_translate(kvm, &tr); | 2077 | r = kvm_vcpu_ioctl_translate(vcpu, &tr); |
1886 | if (r) | 2078 | if (r) |
1887 | goto out; | 2079 | goto out; |
1888 | r = -EFAULT; | 2080 | r = -EFAULT; |
@@ -1897,7 +2089,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1897 | r = -EFAULT; | 2089 | r = -EFAULT; |
1898 | if (copy_from_user(&irq, argp, sizeof irq)) | 2090 | if (copy_from_user(&irq, argp, sizeof irq)) |
1899 | goto out; | 2091 | goto out; |
1900 | r = kvm_dev_ioctl_interrupt(kvm, &irq); | 2092 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
1901 | if (r) | 2093 | if (r) |
1902 | goto out; | 2094 | goto out; |
1903 | r = 0; | 2095 | r = 0; |
@@ -1909,19 +2101,45 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1909 | r = -EFAULT; | 2101 | r = -EFAULT; |
1910 | if (copy_from_user(&dbg, argp, sizeof dbg)) | 2102 | if (copy_from_user(&dbg, argp, sizeof dbg)) |
1911 | goto out; | 2103 | goto out; |
1912 | r = kvm_dev_ioctl_debug_guest(kvm, &dbg); | 2104 | r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg); |
1913 | if (r) | 2105 | if (r) |
1914 | goto out; | 2106 | goto out; |
1915 | r = 0; | 2107 | r = 0; |
1916 | break; | 2108 | break; |
1917 | } | 2109 | } |
2110 | case KVM_GET_MSRS: | ||
2111 | r = msr_io(vcpu, argp, get_msr, 1); | ||
2112 | break; | ||
2113 | case KVM_SET_MSRS: | ||
2114 | r = msr_io(vcpu, argp, do_set_msr, 0); | ||
2115 | break; | ||
2116 | default: | ||
2117 | ; | ||
2118 | } | ||
2119 | out: | ||
2120 | return r; | ||
2121 | } | ||
2122 | |||
2123 | static long kvm_vm_ioctl(struct file *filp, | ||
2124 | unsigned int ioctl, unsigned long arg) | ||
2125 | { | ||
2126 | struct kvm *kvm = filp->private_data; | ||
2127 | void __user *argp = (void __user *)arg; | ||
2128 | int r = -EINVAL; | ||
2129 | |||
2130 | switch (ioctl) { | ||
2131 | case KVM_CREATE_VCPU: | ||
2132 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); | ||
2133 | if (r < 0) | ||
2134 | goto out; | ||
2135 | break; | ||
1918 | case KVM_SET_MEMORY_REGION: { | 2136 | case KVM_SET_MEMORY_REGION: { |
1919 | struct kvm_memory_region kvm_mem; | 2137 | struct kvm_memory_region kvm_mem; |
1920 | 2138 | ||
1921 | r = -EFAULT; | 2139 | r = -EFAULT; |
1922 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | 2140 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) |
1923 | goto out; | 2141 | goto out; |
1924 | r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem); | 2142 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem); |
1925 | if (r) | 2143 | if (r) |
1926 | goto out; | 2144 | goto out; |
1927 | break; | 2145 | break; |
@@ -1932,16 +2150,112 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1932 | r = -EFAULT; | 2150 | r = -EFAULT; |
1933 | if (copy_from_user(&log, argp, sizeof log)) | 2151 | if (copy_from_user(&log, argp, sizeof log)) |
1934 | goto out; | 2152 | goto out; |
1935 | r = kvm_dev_ioctl_get_dirty_log(kvm, &log); | 2153 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
1936 | if (r) | 2154 | if (r) |
1937 | goto out; | 2155 | goto out; |
1938 | break; | 2156 | break; |
1939 | } | 2157 | } |
1940 | case KVM_GET_MSRS: | 2158 | default: |
1941 | r = msr_io(kvm, argp, get_msr, 1); | 2159 | ; |
2160 | } | ||
2161 | out: | ||
2162 | return r; | ||
2163 | } | ||
2164 | |||
2165 | static struct page *kvm_vm_nopage(struct vm_area_struct *vma, | ||
2166 | unsigned long address, | ||
2167 | int *type) | ||
2168 | { | ||
2169 | struct kvm *kvm = vma->vm_file->private_data; | ||
2170 | unsigned long pgoff; | ||
2171 | struct kvm_memory_slot *slot; | ||
2172 | struct page *page; | ||
2173 | |||
2174 | *type = VM_FAULT_MINOR; | ||
2175 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | ||
2176 | slot = gfn_to_memslot(kvm, pgoff); | ||
2177 | if (!slot) | ||
2178 | return NOPAGE_SIGBUS; | ||
2179 | page = gfn_to_page(slot, pgoff); | ||
2180 | if (!page) | ||
2181 | return NOPAGE_SIGBUS; | ||
2182 | get_page(page); | ||
2183 | return page; | ||
2184 | } | ||
2185 | |||
2186 | static struct vm_operations_struct kvm_vm_vm_ops = { | ||
2187 | .nopage = kvm_vm_nopage, | ||
2188 | }; | ||
2189 | |||
2190 | static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) | ||
2191 | { | ||
2192 | vma->vm_ops = &kvm_vm_vm_ops; | ||
2193 | return 0; | ||
2194 | } | ||
2195 | |||
2196 | static struct file_operations kvm_vm_fops = { | ||
2197 | .release = kvm_vm_release, | ||
2198 | .unlocked_ioctl = kvm_vm_ioctl, | ||
2199 | .compat_ioctl = kvm_vm_ioctl, | ||
2200 | .mmap = kvm_vm_mmap, | ||
2201 | }; | ||
2202 | |||
2203 | static int kvm_dev_ioctl_create_vm(void) | ||
2204 | { | ||
2205 | int fd, r; | ||
2206 | struct inode *inode; | ||
2207 | struct file *file; | ||
2208 | struct kvm *kvm; | ||
2209 | |||
2210 | inode = kvmfs_inode(&kvm_vm_fops); | ||
2211 | if (IS_ERR(inode)) { | ||
2212 | r = PTR_ERR(inode); | ||
2213 | goto out1; | ||
2214 | } | ||
2215 | |||
2216 | kvm = kvm_create_vm(); | ||
2217 | if (IS_ERR(kvm)) { | ||
2218 | r = PTR_ERR(kvm); | ||
2219 | goto out2; | ||
2220 | } | ||
2221 | |||
2222 | file = kvmfs_file(inode, kvm); | ||
2223 | if (IS_ERR(file)) { | ||
2224 | r = PTR_ERR(file); | ||
2225 | goto out3; | ||
2226 | } | ||
2227 | kvm->filp = file; | ||
2228 | |||
2229 | r = get_unused_fd(); | ||
2230 | if (r < 0) | ||
2231 | goto out4; | ||
2232 | fd = r; | ||
2233 | fd_install(fd, file); | ||
2234 | |||
2235 | return fd; | ||
2236 | |||
2237 | out4: | ||
2238 | fput(file); | ||
2239 | out3: | ||
2240 | kvm_destroy_vm(kvm); | ||
2241 | out2: | ||
2242 | iput(inode); | ||
2243 | out1: | ||
2244 | return r; | ||
2245 | } | ||
2246 | |||
2247 | static long kvm_dev_ioctl(struct file *filp, | ||
2248 | unsigned int ioctl, unsigned long arg) | ||
2249 | { | ||
2250 | void __user *argp = (void __user *)arg; | ||
2251 | int r = -EINVAL; | ||
2252 | |||
2253 | switch (ioctl) { | ||
2254 | case KVM_GET_API_VERSION: | ||
2255 | r = KVM_API_VERSION; | ||
1942 | break; | 2256 | break; |
1943 | case KVM_SET_MSRS: | 2257 | case KVM_CREATE_VM: |
1944 | r = msr_io(kvm, argp, do_set_msr, 0); | 2258 | r = kvm_dev_ioctl_create_vm(); |
1945 | break; | 2259 | break; |
1946 | case KVM_GET_MSR_INDEX_LIST: { | 2260 | case KVM_GET_MSR_INDEX_LIST: { |
1947 | struct kvm_msr_list __user *user_msr_list = argp; | 2261 | struct kvm_msr_list __user *user_msr_list = argp; |
@@ -1977,43 +2291,11 @@ out: | |||
1977 | return r; | 2291 | return r; |
1978 | } | 2292 | } |
1979 | 2293 | ||
1980 | static struct page *kvm_dev_nopage(struct vm_area_struct *vma, | ||
1981 | unsigned long address, | ||
1982 | int *type) | ||
1983 | { | ||
1984 | struct kvm *kvm = vma->vm_file->private_data; | ||
1985 | unsigned long pgoff; | ||
1986 | struct kvm_memory_slot *slot; | ||
1987 | struct page *page; | ||
1988 | |||
1989 | *type = VM_FAULT_MINOR; | ||
1990 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | ||
1991 | slot = gfn_to_memslot(kvm, pgoff); | ||
1992 | if (!slot) | ||
1993 | return NOPAGE_SIGBUS; | ||
1994 | page = gfn_to_page(slot, pgoff); | ||
1995 | if (!page) | ||
1996 | return NOPAGE_SIGBUS; | ||
1997 | get_page(page); | ||
1998 | return page; | ||
1999 | } | ||
2000 | |||
2001 | static struct vm_operations_struct kvm_dev_vm_ops = { | ||
2002 | .nopage = kvm_dev_nopage, | ||
2003 | }; | ||
2004 | |||
2005 | static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma) | ||
2006 | { | ||
2007 | vma->vm_ops = &kvm_dev_vm_ops; | ||
2008 | return 0; | ||
2009 | } | ||
2010 | |||
2011 | static struct file_operations kvm_chardev_ops = { | 2294 | static struct file_operations kvm_chardev_ops = { |
2012 | .open = kvm_dev_open, | 2295 | .open = kvm_dev_open, |
2013 | .release = kvm_dev_release, | 2296 | .release = kvm_dev_release, |
2014 | .unlocked_ioctl = kvm_dev_ioctl, | 2297 | .unlocked_ioctl = kvm_dev_ioctl, |
2015 | .compat_ioctl = kvm_dev_ioctl, | 2298 | .compat_ioctl = kvm_dev_ioctl, |
2016 | .mmap = kvm_dev_mmap, | ||
2017 | }; | 2299 | }; |
2018 | 2300 | ||
2019 | static struct miscdevice kvm_dev = { | 2301 | static struct miscdevice kvm_dev = { |
@@ -2080,13 +2362,17 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
2080 | int cpu = (long)v; | 2362 | int cpu = (long)v; |
2081 | 2363 | ||
2082 | switch (val) { | 2364 | switch (val) { |
2083 | case CPU_DEAD: | 2365 | case CPU_DOWN_PREPARE: |
2084 | case CPU_UP_CANCELED: | 2366 | case CPU_UP_CANCELED: |
2367 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | ||
2368 | cpu); | ||
2085 | decache_vcpus_on_cpu(cpu); | 2369 | decache_vcpus_on_cpu(cpu); |
2086 | smp_call_function_single(cpu, kvm_arch_ops->hardware_disable, | 2370 | smp_call_function_single(cpu, kvm_arch_ops->hardware_disable, |
2087 | NULL, 0, 1); | 2371 | NULL, 0, 1); |
2088 | break; | 2372 | break; |
2089 | case CPU_UP_PREPARE: | 2373 | case CPU_ONLINE: |
2374 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | ||
2375 | cpu); | ||
2090 | smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, | 2376 | smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, |
2091 | NULL, 0, 1); | 2377 | NULL, 0, 1); |
2092 | break; | 2378 | break; |
@@ -2121,13 +2407,13 @@ static void kvm_exit_debug(void) | |||
2121 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) | 2407 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) |
2122 | { | 2408 | { |
2123 | decache_vcpus_on_cpu(raw_smp_processor_id()); | 2409 | decache_vcpus_on_cpu(raw_smp_processor_id()); |
2124 | on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); | 2410 | on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); |
2125 | return 0; | 2411 | return 0; |
2126 | } | 2412 | } |
2127 | 2413 | ||
2128 | static int kvm_resume(struct sys_device *dev) | 2414 | static int kvm_resume(struct sys_device *dev) |
2129 | { | 2415 | { |
2130 | on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1); | 2416 | on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); |
2131 | return 0; | 2417 | return 0; |
2132 | } | 2418 | } |
2133 | 2419 | ||
@@ -2144,6 +2430,18 @@ static struct sys_device kvm_sysdev = { | |||
2144 | 2430 | ||
2145 | hpa_t bad_page_address; | 2431 | hpa_t bad_page_address; |
2146 | 2432 | ||
2433 | static int kvmfs_get_sb(struct file_system_type *fs_type, int flags, | ||
2434 | const char *dev_name, void *data, struct vfsmount *mnt) | ||
2435 | { | ||
2436 | return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt); | ||
2437 | } | ||
2438 | |||
2439 | static struct file_system_type kvm_fs_type = { | ||
2440 | .name = "kvmfs", | ||
2441 | .get_sb = kvmfs_get_sb, | ||
2442 | .kill_sb = kill_anon_super, | ||
2443 | }; | ||
2444 | |||
2147 | int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) | 2445 | int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) |
2148 | { | 2446 | { |
2149 | int r; | 2447 | int r; |
@@ -2220,8 +2518,16 @@ void kvm_exit_arch(void) | |||
2220 | static __init int kvm_init(void) | 2518 | static __init int kvm_init(void) |
2221 | { | 2519 | { |
2222 | static struct page *bad_page; | 2520 | static struct page *bad_page; |
2223 | int r = 0; | 2521 | int r; |
2522 | |||
2523 | r = register_filesystem(&kvm_fs_type); | ||
2524 | if (r) | ||
2525 | goto out3; | ||
2224 | 2526 | ||
2527 | kvmfs_mnt = kern_mount(&kvm_fs_type); | ||
2528 | r = PTR_ERR(kvmfs_mnt); | ||
2529 | if (IS_ERR(kvmfs_mnt)) | ||
2530 | goto out2; | ||
2225 | kvm_init_debug(); | 2531 | kvm_init_debug(); |
2226 | 2532 | ||
2227 | kvm_init_msr_list(); | 2533 | kvm_init_msr_list(); |
@@ -2234,10 +2540,14 @@ static __init int kvm_init(void) | |||
2234 | bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT; | 2540 | bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT; |
2235 | memset(__va(bad_page_address), 0, PAGE_SIZE); | 2541 | memset(__va(bad_page_address), 0, PAGE_SIZE); |
2236 | 2542 | ||
2237 | return r; | 2543 | return 0; |
2238 | 2544 | ||
2239 | out: | 2545 | out: |
2240 | kvm_exit_debug(); | 2546 | kvm_exit_debug(); |
2547 | mntput(kvmfs_mnt); | ||
2548 | out2: | ||
2549 | unregister_filesystem(&kvm_fs_type); | ||
2550 | out3: | ||
2241 | return r; | 2551 | return r; |
2242 | } | 2552 | } |
2243 | 2553 | ||
@@ -2245,6 +2555,8 @@ static __exit void kvm_exit(void) | |||
2245 | { | 2555 | { |
2246 | kvm_exit_debug(); | 2556 | kvm_exit_debug(); |
2247 | __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); | 2557 | __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); |
2558 | mntput(kvmfs_mnt); | ||
2559 | unregister_filesystem(&kvm_fs_type); | ||
2248 | } | 2560 | } |
2249 | 2561 | ||
2250 | module_init(kvm_init) | 2562 | module_init(kvm_init) |
diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h index 74cc862f4935..624f1ca48657 100644 --- a/drivers/kvm/kvm_svm.h +++ b/drivers/kvm/kvm_svm.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __KVM_SVM_H | 1 | #ifndef __KVM_SVM_H |
2 | #define __KVM_SVM_H | 2 | #define __KVM_SVM_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | ||
4 | #include <linux/types.h> | 5 | #include <linux/types.h> |
5 | #include <linux/list.h> | 6 | #include <linux/list.h> |
6 | #include <asm/msr.h> | 7 | #include <asm/msr.h> |
@@ -18,7 +19,7 @@ static const u32 host_save_msrs[] = { | |||
18 | MSR_IA32_LASTBRANCHTOIP, MSR_IA32_LASTINTFROMIP,MSR_IA32_LASTINTTOIP,*/ | 19 | MSR_IA32_LASTBRANCHTOIP, MSR_IA32_LASTINTFROMIP,MSR_IA32_LASTINTTOIP,*/ |
19 | }; | 20 | }; |
20 | 21 | ||
21 | #define NR_HOST_SAVE_MSRS (sizeof(host_save_msrs) / sizeof(*host_save_msrs)) | 22 | #define NR_HOST_SAVE_MSRS ARRAY_SIZE(host_save_msrs) |
22 | #define NUM_DB_REGS 4 | 23 | #define NUM_DB_REGS 4 |
23 | 24 | ||
24 | struct vcpu_svm { | 25 | struct vcpu_svm { |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index be793770f31b..a1a93368f314 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -298,18 +298,18 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte) | |||
298 | if (!is_rmap_pte(*spte)) | 298 | if (!is_rmap_pte(*spte)) |
299 | return; | 299 | return; |
300 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | 300 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); |
301 | if (!page->private) { | 301 | if (!page_private(page)) { |
302 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); | 302 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); |
303 | page->private = (unsigned long)spte; | 303 | set_page_private(page,(unsigned long)spte); |
304 | } else if (!(page->private & 1)) { | 304 | } else if (!(page_private(page) & 1)) { |
305 | rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); | 305 | rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); |
306 | desc = mmu_alloc_rmap_desc(vcpu); | 306 | desc = mmu_alloc_rmap_desc(vcpu); |
307 | desc->shadow_ptes[0] = (u64 *)page->private; | 307 | desc->shadow_ptes[0] = (u64 *)page_private(page); |
308 | desc->shadow_ptes[1] = spte; | 308 | desc->shadow_ptes[1] = spte; |
309 | page->private = (unsigned long)desc | 1; | 309 | set_page_private(page,(unsigned long)desc | 1); |
310 | } else { | 310 | } else { |
311 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); | 311 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); |
312 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | 312 | desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul); |
313 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) | 313 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) |
314 | desc = desc->more; | 314 | desc = desc->more; |
315 | if (desc->shadow_ptes[RMAP_EXT-1]) { | 315 | if (desc->shadow_ptes[RMAP_EXT-1]) { |
@@ -337,12 +337,12 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, | |||
337 | if (j != 0) | 337 | if (j != 0) |
338 | return; | 338 | return; |
339 | if (!prev_desc && !desc->more) | 339 | if (!prev_desc && !desc->more) |
340 | page->private = (unsigned long)desc->shadow_ptes[0]; | 340 | set_page_private(page,(unsigned long)desc->shadow_ptes[0]); |
341 | else | 341 | else |
342 | if (prev_desc) | 342 | if (prev_desc) |
343 | prev_desc->more = desc->more; | 343 | prev_desc->more = desc->more; |
344 | else | 344 | else |
345 | page->private = (unsigned long)desc->more | 1; | 345 | set_page_private(page,(unsigned long)desc->more | 1); |
346 | mmu_free_rmap_desc(vcpu, desc); | 346 | mmu_free_rmap_desc(vcpu, desc); |
347 | } | 347 | } |
348 | 348 | ||
@@ -356,20 +356,20 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) | |||
356 | if (!is_rmap_pte(*spte)) | 356 | if (!is_rmap_pte(*spte)) |
357 | return; | 357 | return; |
358 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | 358 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); |
359 | if (!page->private) { | 359 | if (!page_private(page)) { |
360 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); | 360 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); |
361 | BUG(); | 361 | BUG(); |
362 | } else if (!(page->private & 1)) { | 362 | } else if (!(page_private(page) & 1)) { |
363 | rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); | 363 | rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); |
364 | if ((u64 *)page->private != spte) { | 364 | if ((u64 *)page_private(page) != spte) { |
365 | printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", | 365 | printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", |
366 | spte, *spte); | 366 | spte, *spte); |
367 | BUG(); | 367 | BUG(); |
368 | } | 368 | } |
369 | page->private = 0; | 369 | set_page_private(page,0); |
370 | } else { | 370 | } else { |
371 | rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); | 371 | rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); |
372 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | 372 | desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul); |
373 | prev_desc = NULL; | 373 | prev_desc = NULL; |
374 | while (desc) { | 374 | while (desc) { |
375 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) | 375 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) |
@@ -398,11 +398,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | |||
398 | BUG_ON(!slot); | 398 | BUG_ON(!slot); |
399 | page = gfn_to_page(slot, gfn); | 399 | page = gfn_to_page(slot, gfn); |
400 | 400 | ||
401 | while (page->private) { | 401 | while (page_private(page)) { |
402 | if (!(page->private & 1)) | 402 | if (!(page_private(page) & 1)) |
403 | spte = (u64 *)page->private; | 403 | spte = (u64 *)page_private(page); |
404 | else { | 404 | else { |
405 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | 405 | desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul); |
406 | spte = desc->shadow_ptes[0]; | 406 | spte = desc->shadow_ptes[0]; |
407 | } | 407 | } |
408 | BUG_ON(!spte); | 408 | BUG_ON(!spte); |
@@ -1218,7 +1218,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | |||
1218 | INIT_LIST_HEAD(&page_header->link); | 1218 | INIT_LIST_HEAD(&page_header->link); |
1219 | if ((page = alloc_page(GFP_KERNEL)) == NULL) | 1219 | if ((page = alloc_page(GFP_KERNEL)) == NULL) |
1220 | goto error_1; | 1220 | goto error_1; |
1221 | page->private = (unsigned long)page_header; | 1221 | set_page_private(page, (unsigned long)page_header); |
1222 | page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; | 1222 | page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; |
1223 | memset(__va(page_header->page_hpa), 0, PAGE_SIZE); | 1223 | memset(__va(page_header->page_hpa), 0, PAGE_SIZE); |
1224 | list_add(&page_header->link, &vcpu->free_pages); | 1224 | list_add(&page_header->link, &vcpu->free_pages); |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index b6b90e9e1301..f3bcee904651 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -128,8 +128,10 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
128 | goto access_error; | 128 | goto access_error; |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | if (!(*ptep & PT_ACCESSED_MASK)) | 131 | if (!(*ptep & PT_ACCESSED_MASK)) { |
132 | *ptep |= PT_ACCESSED_MASK; /* avoid rmw */ | 132 | mark_page_dirty(vcpu->kvm, table_gfn); |
133 | *ptep |= PT_ACCESSED_MASK; | ||
134 | } | ||
133 | 135 | ||
134 | if (walker->level == PT_PAGE_TABLE_LEVEL) { | 136 | if (walker->level == PT_PAGE_TABLE_LEVEL) { |
135 | walker->gfn = (*ptep & PT_BASE_ADDR_MASK) | 137 | walker->gfn = (*ptep & PT_BASE_ADDR_MASK) |
@@ -185,6 +187,12 @@ static void FNAME(release_walker)(struct guest_walker *walker) | |||
185 | kunmap_atomic(walker->table, KM_USER0); | 187 | kunmap_atomic(walker->table, KM_USER0); |
186 | } | 188 | } |
187 | 189 | ||
190 | static void FNAME(mark_pagetable_dirty)(struct kvm *kvm, | ||
191 | struct guest_walker *walker) | ||
192 | { | ||
193 | mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]); | ||
194 | } | ||
195 | |||
188 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, | 196 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, |
189 | u64 *shadow_pte, u64 access_bits, gfn_t gfn) | 197 | u64 *shadow_pte, u64 access_bits, gfn_t gfn) |
190 | { | 198 | { |
@@ -348,12 +356,15 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, | |||
348 | } else if (kvm_mmu_lookup_page(vcpu, gfn)) { | 356 | } else if (kvm_mmu_lookup_page(vcpu, gfn)) { |
349 | pgprintk("%s: found shadow page for %lx, marking ro\n", | 357 | pgprintk("%s: found shadow page for %lx, marking ro\n", |
350 | __FUNCTION__, gfn); | 358 | __FUNCTION__, gfn); |
359 | mark_page_dirty(vcpu->kvm, gfn); | ||
360 | FNAME(mark_pagetable_dirty)(vcpu->kvm, walker); | ||
351 | *guest_ent |= PT_DIRTY_MASK; | 361 | *guest_ent |= PT_DIRTY_MASK; |
352 | *write_pt = 1; | 362 | *write_pt = 1; |
353 | return 0; | 363 | return 0; |
354 | } | 364 | } |
355 | mark_page_dirty(vcpu->kvm, gfn); | 365 | mark_page_dirty(vcpu->kvm, gfn); |
356 | *shadow_ent |= PT_WRITABLE_MASK; | 366 | *shadow_ent |= PT_WRITABLE_MASK; |
367 | FNAME(mark_pagetable_dirty)(vcpu->kvm, walker); | ||
357 | *guest_ent |= PT_DIRTY_MASK; | 368 | *guest_ent |= PT_DIRTY_MASK; |
358 | rmap_add(vcpu, shadow_ent); | 369 | rmap_add(vcpu, shadow_ent); |
359 | 370 | ||
@@ -430,9 +441,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
430 | /* | 441 | /* |
431 | * mmio: emulate if accessible, otherwise its a guest fault. | 442 | * mmio: emulate if accessible, otherwise its a guest fault. |
432 | */ | 443 | */ |
433 | if (is_io_pte(*shadow_pte)) { | 444 | if (is_io_pte(*shadow_pte)) |
434 | return 1; | 445 | return 1; |
435 | } | ||
436 | 446 | ||
437 | ++kvm_stat.pf_fixed; | 447 | ++kvm_stat.pf_fixed; |
438 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); | 448 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); |
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 83da4ea150a3..3d8ea7ac2ecc 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -15,6 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/kernel.h> | ||
18 | #include <linux/vmalloc.h> | 19 | #include <linux/vmalloc.h> |
19 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
20 | #include <linux/profile.h> | 21 | #include <linux/profile.h> |
@@ -75,7 +76,7 @@ struct svm_init_data { | |||
75 | 76 | ||
76 | static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; | 77 | static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; |
77 | 78 | ||
78 | #define NUM_MSR_MAPS (sizeof(msrpm_ranges) / sizeof(*msrpm_ranges)) | 79 | #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) |
79 | #define MSRS_RANGE_SIZE 2048 | 80 | #define MSRS_RANGE_SIZE 2048 |
80 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) | 81 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) |
81 | 82 | ||
@@ -485,6 +486,7 @@ static void init_vmcb(struct vmcb *vmcb) | |||
485 | 486 | ||
486 | control->intercept = (1ULL << INTERCEPT_INTR) | | 487 | control->intercept = (1ULL << INTERCEPT_INTR) | |
487 | (1ULL << INTERCEPT_NMI) | | 488 | (1ULL << INTERCEPT_NMI) | |
489 | (1ULL << INTERCEPT_SMI) | | ||
488 | /* | 490 | /* |
489 | * selective cr0 intercept bug? | 491 | * selective cr0 intercept bug? |
490 | * 0: 0f 22 d8 mov %eax,%cr3 | 492 | * 0: 0f 22 d8 mov %eax,%cr3 |
@@ -553,7 +555,7 @@ static void init_vmcb(struct vmcb *vmcb) | |||
553 | * cr0 val on cpu init should be 0x60000010, we enable cpu | 555 | * cr0 val on cpu init should be 0x60000010, we enable cpu |
554 | * cache by default. the orderly way is to enable cache in bios. | 556 | * cache by default. the orderly way is to enable cache in bios. |
555 | */ | 557 | */ |
556 | save->cr0 = 0x00000010 | CR0_PG_MASK; | 558 | save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; |
557 | save->cr4 = CR4_PAE_MASK; | 559 | save->cr4 = CR4_PAE_MASK; |
558 | /* rdx = ?? */ | 560 | /* rdx = ?? */ |
559 | } | 561 | } |
@@ -598,10 +600,9 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) | |||
598 | kfree(vcpu->svm); | 600 | kfree(vcpu->svm); |
599 | } | 601 | } |
600 | 602 | ||
601 | static struct kvm_vcpu *svm_vcpu_load(struct kvm_vcpu *vcpu) | 603 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) |
602 | { | 604 | { |
603 | get_cpu(); | 605 | get_cpu(); |
604 | return vcpu; | ||
605 | } | 606 | } |
606 | 607 | ||
607 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) | 608 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
@@ -1042,22 +1043,22 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1042 | 1043 | ||
1043 | addr_mask = io_adress(vcpu, _in, &kvm_run->io.address); | 1044 | addr_mask = io_adress(vcpu, _in, &kvm_run->io.address); |
1044 | if (!addr_mask) { | 1045 | if (!addr_mask) { |
1045 | printk(KERN_DEBUG "%s: get io address failed\n", __FUNCTION__); | 1046 | printk(KERN_DEBUG "%s: get io address failed\n", |
1047 | __FUNCTION__); | ||
1046 | return 1; | 1048 | return 1; |
1047 | } | 1049 | } |
1048 | 1050 | ||
1049 | if (kvm_run->io.rep) { | 1051 | if (kvm_run->io.rep) { |
1050 | kvm_run->io.count = vcpu->regs[VCPU_REGS_RCX] & addr_mask; | 1052 | kvm_run->io.count |
1053 | = vcpu->regs[VCPU_REGS_RCX] & addr_mask; | ||
1051 | kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags | 1054 | kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags |
1052 | & X86_EFLAGS_DF) != 0; | 1055 | & X86_EFLAGS_DF) != 0; |
1053 | } | 1056 | } |
1054 | } else { | 1057 | } else |
1055 | kvm_run->io.value = vcpu->svm->vmcb->save.rax; | 1058 | kvm_run->io.value = vcpu->svm->vmcb->save.rax; |
1056 | } | ||
1057 | return 0; | 1059 | return 0; |
1058 | } | 1060 | } |
1059 | 1061 | ||
1060 | |||
1061 | static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1062 | static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1062 | { | 1063 | { |
1063 | return 1; | 1064 | return 1; |
@@ -1075,6 +1076,12 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1075 | return 0; | 1076 | return 0; |
1076 | } | 1077 | } |
1077 | 1078 | ||
1079 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1080 | { | ||
1081 | vcpu->svm->vmcb->save.rip += 3; | ||
1082 | return kvm_hypercall(vcpu, kvm_run); | ||
1083 | } | ||
1084 | |||
1078 | static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1085 | static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1079 | { | 1086 | { |
1080 | inject_ud(vcpu); | 1087 | inject_ud(vcpu); |
@@ -1275,7 +1282,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
1275 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, | 1282 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, |
1276 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, | 1283 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, |
1277 | [SVM_EXIT_VMRUN] = invalid_op_interception, | 1284 | [SVM_EXIT_VMRUN] = invalid_op_interception, |
1278 | [SVM_EXIT_VMMCALL] = invalid_op_interception, | 1285 | [SVM_EXIT_VMMCALL] = vmmcall_interception, |
1279 | [SVM_EXIT_VMLOAD] = invalid_op_interception, | 1286 | [SVM_EXIT_VMLOAD] = invalid_op_interception, |
1280 | [SVM_EXIT_VMSAVE] = invalid_op_interception, | 1287 | [SVM_EXIT_VMSAVE] = invalid_op_interception, |
1281 | [SVM_EXIT_STGI] = invalid_op_interception, | 1288 | [SVM_EXIT_STGI] = invalid_op_interception, |
@@ -1297,7 +1304,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1297 | __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, | 1304 | __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, |
1298 | exit_code); | 1305 | exit_code); |
1299 | 1306 | ||
1300 | if (exit_code >= sizeof(svm_exit_handlers) / sizeof(*svm_exit_handlers) | 1307 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) |
1301 | || svm_exit_handlers[exit_code] == 0) { | 1308 | || svm_exit_handlers[exit_code] == 0) { |
1302 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 1309 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; |
1303 | printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n", | 1310 | printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n", |
@@ -1668,6 +1675,18 @@ static int is_disabled(void) | |||
1668 | return 0; | 1675 | return 0; |
1669 | } | 1676 | } |
1670 | 1677 | ||
1678 | static void | ||
1679 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | ||
1680 | { | ||
1681 | /* | ||
1682 | * Patch in the VMMCALL instruction: | ||
1683 | */ | ||
1684 | hypercall[0] = 0x0f; | ||
1685 | hypercall[1] = 0x01; | ||
1686 | hypercall[2] = 0xd9; | ||
1687 | hypercall[3] = 0xc3; | ||
1688 | } | ||
1689 | |||
1671 | static struct kvm_arch_ops svm_arch_ops = { | 1690 | static struct kvm_arch_ops svm_arch_ops = { |
1672 | .cpu_has_kvm_support = has_svm, | 1691 | .cpu_has_kvm_support = has_svm, |
1673 | .disabled_by_bios = is_disabled, | 1692 | .disabled_by_bios = is_disabled, |
@@ -1716,6 +1735,7 @@ static struct kvm_arch_ops svm_arch_ops = { | |||
1716 | .run = svm_vcpu_run, | 1735 | .run = svm_vcpu_run, |
1717 | .skip_emulated_instruction = skip_emulated_instruction, | 1736 | .skip_emulated_instruction = skip_emulated_instruction, |
1718 | .vcpu_setup = svm_vcpu_setup, | 1737 | .vcpu_setup = svm_vcpu_setup, |
1738 | .patch_hypercall = svm_patch_hypercall, | ||
1719 | }; | 1739 | }; |
1720 | 1740 | ||
1721 | static int __init svm_init(void) | 1741 | static int __init svm_init(void) |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index fd4e91734388..c07178e61122 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "vmx.h" | 19 | #include "vmx.h" |
20 | #include "kvm_vmx.h" | 20 | #include "kvm_vmx.h" |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/kernel.h> | ||
22 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
23 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
24 | #include <linux/profile.h> | 25 | #include <linux/profile.h> |
@@ -27,7 +28,6 @@ | |||
27 | 28 | ||
28 | #include "segment_descriptor.h" | 29 | #include "segment_descriptor.h" |
29 | 30 | ||
30 | |||
31 | MODULE_AUTHOR("Qumranet"); | 31 | MODULE_AUTHOR("Qumranet"); |
32 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
33 | 33 | ||
@@ -76,7 +76,7 @@ static const u32 vmx_msr_index[] = { | |||
76 | #endif | 76 | #endif |
77 | MSR_EFER, MSR_K6_STAR, | 77 | MSR_EFER, MSR_K6_STAR, |
78 | }; | 78 | }; |
79 | #define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index)) | 79 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
80 | 80 | ||
81 | static inline int is_page_fault(u32 intr_info) | 81 | static inline int is_page_fault(u32 intr_info) |
82 | { | 82 | { |
@@ -204,7 +204,7 @@ static void vmcs_write64(unsigned long field, u64 value) | |||
204 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes | 204 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes |
205 | * vcpu mutex is already taken. | 205 | * vcpu mutex is already taken. |
206 | */ | 206 | */ |
207 | static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu) | 207 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu) |
208 | { | 208 | { |
209 | u64 phys_addr = __pa(vcpu->vmcs); | 209 | u64 phys_addr = __pa(vcpu->vmcs); |
210 | int cpu; | 210 | int cpu; |
@@ -242,7 +242,6 @@ static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu) | |||
242 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); | 242 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); |
243 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ | 243 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ |
244 | } | 244 | } |
245 | return vcpu; | ||
246 | } | 245 | } |
247 | 246 | ||
248 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) | 247 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) |
@@ -418,10 +417,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
418 | case MSR_IA32_SYSENTER_ESP: | 417 | case MSR_IA32_SYSENTER_ESP: |
419 | vmcs_write32(GUEST_SYSENTER_ESP, data); | 418 | vmcs_write32(GUEST_SYSENTER_ESP, data); |
420 | break; | 419 | break; |
421 | case MSR_IA32_TIME_STAMP_COUNTER: { | 420 | case MSR_IA32_TIME_STAMP_COUNTER: |
422 | guest_write_tsc(data); | 421 | guest_write_tsc(data); |
423 | break; | 422 | break; |
424 | } | ||
425 | default: | 423 | default: |
426 | msr = find_msr_entry(vcpu, msr_index); | 424 | msr = find_msr_entry(vcpu, msr_index); |
427 | if (msr) { | 425 | if (msr) { |
@@ -793,6 +791,9 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
793 | */ | 791 | */ |
794 | static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0) | 792 | static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0) |
795 | { | 793 | { |
794 | if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) | ||
795 | enter_rmode(vcpu); | ||
796 | |||
796 | vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0); | 797 | vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0); |
797 | update_exception_bitmap(vcpu); | 798 | update_exception_bitmap(vcpu); |
798 | vmcs_writel(CR0_READ_SHADOW, cr0); | 799 | vmcs_writel(CR0_READ_SHADOW, cr0); |
@@ -1467,6 +1468,18 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1467 | return 0; | 1468 | return 0; |
1468 | } | 1469 | } |
1469 | 1470 | ||
1471 | static void | ||
1472 | vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | ||
1473 | { | ||
1474 | /* | ||
1475 | * Patch in the VMCALL instruction: | ||
1476 | */ | ||
1477 | hypercall[0] = 0x0f; | ||
1478 | hypercall[1] = 0x01; | ||
1479 | hypercall[2] = 0xc1; | ||
1480 | hypercall[3] = 0xc3; | ||
1481 | } | ||
1482 | |||
1470 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1483 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1471 | { | 1484 | { |
1472 | u64 exit_qualification; | 1485 | u64 exit_qualification; |
@@ -1643,6 +1656,12 @@ static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1643 | return 0; | 1656 | return 0; |
1644 | } | 1657 | } |
1645 | 1658 | ||
1659 | static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1660 | { | ||
1661 | vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3); | ||
1662 | return kvm_hypercall(vcpu, kvm_run); | ||
1663 | } | ||
1664 | |||
1646 | /* | 1665 | /* |
1647 | * The exit handlers return 1 if the exit was handled fully and guest execution | 1666 | * The exit handlers return 1 if the exit was handled fully and guest execution |
1648 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | 1667 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
@@ -1661,6 +1680,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
1661 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, | 1680 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, |
1662 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, | 1681 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, |
1663 | [EXIT_REASON_HLT] = handle_halt, | 1682 | [EXIT_REASON_HLT] = handle_halt, |
1683 | [EXIT_REASON_VMCALL] = handle_vmcall, | ||
1664 | }; | 1684 | }; |
1665 | 1685 | ||
1666 | static const int kvm_vmx_max_exit_handlers = | 1686 | static const int kvm_vmx_max_exit_handlers = |
@@ -2062,6 +2082,7 @@ static struct kvm_arch_ops vmx_arch_ops = { | |||
2062 | .run = vmx_vcpu_run, | 2082 | .run = vmx_vcpu_run, |
2063 | .skip_emulated_instruction = skip_emulated_instruction, | 2083 | .skip_emulated_instruction = skip_emulated_instruction, |
2064 | .vcpu_setup = vmx_vcpu_setup, | 2084 | .vcpu_setup = vmx_vcpu_setup, |
2085 | .patch_hypercall = vmx_patch_hypercall, | ||
2065 | }; | 2086 | }; |
2066 | 2087 | ||
2067 | static int __init vmx_init(void) | 2088 | static int __init vmx_init(void) |
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c index a1be1e279df4..b0466b88f52c 100644 --- a/drivers/media/video/cx88/cx88-blackbird.c +++ b/drivers/media/video/cx88/cx88-blackbird.c | |||
@@ -56,7 +56,8 @@ MODULE_PARM_DESC(debug,"enable debug messages [blackbird]"); | |||
56 | 56 | ||
57 | /* ------------------------------------------------------------------ */ | 57 | /* ------------------------------------------------------------------ */ |
58 | 58 | ||
59 | #define BLACKBIRD_FIRM_IMAGE_SIZE 256*1024 | 59 | #define OLD_BLACKBIRD_FIRM_IMAGE_SIZE 262144 |
60 | #define BLACKBIRD_FIRM_IMAGE_SIZE 376836 | ||
60 | 61 | ||
61 | /* defines below are from ivtv-driver.h */ | 62 | /* defines below are from ivtv-driver.h */ |
62 | 63 | ||
@@ -404,7 +405,7 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev) | |||
404 | u32 value; | 405 | u32 value; |
405 | int i; | 406 | int i; |
406 | 407 | ||
407 | for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) { | 408 | for (i = 0; i < dev->fw_size; i++) { |
408 | memory_read(dev->core, i, &value); | 409 | memory_read(dev->core, i, &value); |
409 | if (value == signature[signaturecnt]) | 410 | if (value == signature[signaturecnt]) |
410 | signaturecnt++; | 411 | signaturecnt++; |
@@ -452,12 +453,15 @@ static int blackbird_load_firmware(struct cx8802_dev *dev) | |||
452 | return -1; | 453 | return -1; |
453 | } | 454 | } |
454 | 455 | ||
455 | if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) { | 456 | if ((firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) && |
456 | dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n", | 457 | (firmware->size != OLD_BLACKBIRD_FIRM_IMAGE_SIZE)) { |
457 | firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE); | 458 | dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d or %d)\n", |
459 | firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE, | ||
460 | OLD_BLACKBIRD_FIRM_IMAGE_SIZE); | ||
458 | release_firmware(firmware); | 461 | release_firmware(firmware); |
459 | return -1; | 462 | return -1; |
460 | } | 463 | } |
464 | dev->fw_size = firmware->size; | ||
461 | 465 | ||
462 | if (0 != memcmp(firmware->data, magic, 8)) { | 466 | if (0 != memcmp(firmware->data, magic, 8)) { |
463 | dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n"); | 467 | dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n"); |
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h index d2ecfba9bb4d..a4f7befda5b0 100644 --- a/drivers/media/video/cx88/cx88.h +++ b/drivers/media/video/cx88/cx88.h | |||
@@ -463,6 +463,7 @@ struct cx8802_dev { | |||
463 | u32 mailbox; | 463 | u32 mailbox; |
464 | int width; | 464 | int width; |
465 | int height; | 465 | int height; |
466 | int fw_size; | ||
466 | 467 | ||
467 | #if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE) | 468 | #if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE) |
468 | /* for dvb only */ | 469 | /* for dvb only */ |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c index 1cd4bb3ae260..1ff5138e4bb6 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c | |||
@@ -1268,7 +1268,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw) | |||
1268 | if (fw_len % sizeof(u32)) { | 1268 | if (fw_len % sizeof(u32)) { |
1269 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | 1269 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, |
1270 | "size of %s firmware" | 1270 | "size of %s firmware" |
1271 | " must be a multiple of %u bytes", | 1271 | " must be a multiple of %zu bytes", |
1272 | fw_files[fwidx],sizeof(u32)); | 1272 | fw_files[fwidx],sizeof(u32)); |
1273 | release_firmware(fw_entry); | 1273 | release_firmware(fw_entry); |
1274 | return -1; | 1274 | return -1; |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 6f93a765e564..12c8453f44bc 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -448,8 +448,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
448 | spin_lock_irqsave(&cp->lock, flags); | 448 | spin_lock_irqsave(&cp->lock, flags); |
449 | cp->cpcmd &= ~RxVlanOn; | 449 | cp->cpcmd &= ~RxVlanOn; |
450 | cpw16(CpCmd, cp->cpcmd); | 450 | cpw16(CpCmd, cp->cpcmd); |
451 | if (cp->vlgrp) | 451 | vlan_group_set_device(cp->vlgrp, vid, NULL); |
452 | cp->vlgrp->vlan_devices[vid] = NULL; | ||
453 | spin_unlock_irqrestore(&cp->lock, flags); | 452 | spin_unlock_irqrestore(&cp->lock, flags); |
454 | } | 453 | } |
455 | #endif /* CP_VLAN_TAG_USED */ | 454 | #endif /* CP_VLAN_TAG_USED */ |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 33c6645455ae..7138e0e025bc 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c | |||
@@ -2293,10 +2293,7 @@ static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2293 | 2293 | ||
2294 | local_irq_save(flags); | 2294 | local_irq_save(flags); |
2295 | ace_mask_irq(dev); | 2295 | ace_mask_irq(dev); |
2296 | 2296 | vlan_group_set_device(ap->vlgrp, vid, NULL); | |
2297 | if (ap->vlgrp) | ||
2298 | ap->vlgrp->vlan_devices[vid] = NULL; | ||
2299 | |||
2300 | ace_unmask_irq(dev); | 2297 | ace_unmask_irq(dev); |
2301 | local_irq_restore(flags); | 2298 | local_irq_restore(flags); |
2302 | } | 2299 | } |
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index 9c399aaefbdd..962c954c2d56 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c | |||
@@ -1737,8 +1737,7 @@ static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid | |||
1737 | { | 1737 | { |
1738 | struct amd8111e_priv *lp = netdev_priv(dev); | 1738 | struct amd8111e_priv *lp = netdev_priv(dev); |
1739 | spin_lock_irq(&lp->lock); | 1739 | spin_lock_irq(&lp->lock); |
1740 | if (lp->vlgrp) | 1740 | vlan_group_set_device(lp->vlgrp, vid, NULL); |
1741 | lp->vlgrp->vlan_devices[vid] = NULL; | ||
1742 | spin_unlock_irq(&lp->lock); | 1741 | spin_unlock_irq(&lp->lock); |
1743 | } | 1742 | } |
1744 | #endif | 1743 | #endif |
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 65673485bb6b..88d4f70035bb 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -1252,8 +1252,7 @@ static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
1252 | 1252 | ||
1253 | spin_lock_irqsave(&adapter->lock, flags); | 1253 | spin_lock_irqsave(&adapter->lock, flags); |
1254 | /* atl1_irq_disable(adapter); */ | 1254 | /* atl1_irq_disable(adapter); */ |
1255 | if (adapter->vlgrp) | 1255 | vlan_group_set_device(adapter->vlgrp, vid, NULL); |
1256 | adapter->vlgrp->vlan_devices[vid] = NULL; | ||
1257 | /* atl1_irq_enable(adapter); */ | 1256 | /* atl1_irq_enable(adapter); */ |
1258 | spin_unlock_irqrestore(&adapter->lock, flags); | 1257 | spin_unlock_irqrestore(&adapter->lock, flags); |
1259 | /* We don't do Vlan filtering */ | 1258 | /* We don't do Vlan filtering */ |
@@ -1266,7 +1265,7 @@ static void atl1_restore_vlan(struct atl1_adapter *adapter) | |||
1266 | if (adapter->vlgrp) { | 1265 | if (adapter->vlgrp) { |
1267 | u16 vid; | 1266 | u16 vid; |
1268 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 1267 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
1269 | if (!adapter->vlgrp->vlan_devices[vid]) | 1268 | if (!vlan_group_get_device(adapter->vlgrp, vid)) |
1270 | continue; | 1269 | continue; |
1271 | atl1_vlan_rx_add_vid(adapter->netdev, vid); | 1270 | atl1_vlan_rx_add_vid(adapter->netdev, vid); |
1272 | } | 1271 | } |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 5a96d7611af1..c12e5ea61819 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -4467,9 +4467,7 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) | |||
4467 | struct bnx2 *bp = netdev_priv(dev); | 4467 | struct bnx2 *bp = netdev_priv(dev); |
4468 | 4468 | ||
4469 | bnx2_netif_stop(bp); | 4469 | bnx2_netif_stop(bp); |
4470 | 4470 | vlan_group_set_device(bp->vlgrp, vid, NULL); | |
4471 | if (bp->vlgrp) | ||
4472 | bp->vlgrp->vlan_devices[vid] = NULL; | ||
4473 | bnx2_set_rx_mode(dev); | 4471 | bnx2_set_rx_mode(dev); |
4474 | 4472 | ||
4475 | bnx2_netif_start(bp); | 4473 | bnx2_netif_start(bp); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a7c8f98a890c..ea73ebff4387 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -488,9 +488,9 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) | |||
488 | /* Save and then restore vlan_dev in the grp array, | 488 | /* Save and then restore vlan_dev in the grp array, |
489 | * since the slave's driver might clear it. | 489 | * since the slave's driver might clear it. |
490 | */ | 490 | */ |
491 | vlan_dev = bond->vlgrp->vlan_devices[vid]; | 491 | vlan_dev = vlan_group_get_device(bond->vlgrp, vid); |
492 | slave_dev->vlan_rx_kill_vid(slave_dev, vid); | 492 | slave_dev->vlan_rx_kill_vid(slave_dev, vid); |
493 | bond->vlgrp->vlan_devices[vid] = vlan_dev; | 493 | vlan_group_set_device(bond->vlgrp, vid, vlan_dev); |
494 | } | 494 | } |
495 | } | 495 | } |
496 | 496 | ||
@@ -550,9 +550,9 @@ static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *s | |||
550 | /* Save and then restore vlan_dev in the grp array, | 550 | /* Save and then restore vlan_dev in the grp array, |
551 | * since the slave's driver might clear it. | 551 | * since the slave's driver might clear it. |
552 | */ | 552 | */ |
553 | vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; | 553 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); |
554 | slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); | 554 | slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); |
555 | bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev; | 555 | vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev); |
556 | } | 556 | } |
557 | 557 | ||
558 | unreg: | 558 | unreg: |
@@ -2397,7 +2397,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2397 | vlan_id = 0; | 2397 | vlan_id = 0; |
2398 | list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, | 2398 | list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, |
2399 | vlan_list) { | 2399 | vlan_list) { |
2400 | vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; | 2400 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); |
2401 | if (vlan_dev == rt->u.dst.dev) { | 2401 | if (vlan_dev == rt->u.dst.dev) { |
2402 | vlan_id = vlan->vlan_id; | 2402 | vlan_id = vlan->vlan_id; |
2403 | dprintk("basa: vlan match on %s %d\n", | 2403 | dprintk("basa: vlan match on %s %d\n", |
@@ -2444,7 +2444,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond) | |||
2444 | } | 2444 | } |
2445 | 2445 | ||
2446 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | 2446 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { |
2447 | vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; | 2447 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); |
2448 | if (vlan->vlan_ip) { | 2448 | if (vlan->vlan_ip) { |
2449 | bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, | 2449 | bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, |
2450 | vlan->vlan_ip, vlan->vlan_id); | 2450 | vlan->vlan_ip, vlan->vlan_id); |
@@ -3371,7 +3371,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
3371 | 3371 | ||
3372 | list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, | 3372 | list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, |
3373 | vlan_list) { | 3373 | vlan_list) { |
3374 | vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; | 3374 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); |
3375 | if (vlan_dev == event_dev) { | 3375 | if (vlan_dev == event_dev) { |
3376 | switch (event) { | 3376 | switch (event) { |
3377 | case NETDEV_UP: | 3377 | case NETDEV_UP: |
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 7d0f24f69777..125c9b105869 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -889,8 +889,7 @@ static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
889 | struct adapter *adapter = dev->priv; | 889 | struct adapter *adapter = dev->priv; |
890 | 890 | ||
891 | spin_lock_irq(&adapter->async_lock); | 891 | spin_lock_irq(&adapter->async_lock); |
892 | if (adapter->vlan_grp) | 892 | vlan_group_set_device(adapter->vlan_grp, vid, NULL); |
893 | adapter->vlan_grp->vlan_devices[vid] = NULL; | ||
894 | spin_unlock_irq(&adapter->async_lock); | 893 | spin_unlock_irq(&adapter->async_lock); |
895 | } | 894 | } |
896 | #endif | 895 | #endif |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index b2cf5f6feb4a..f6ed033efb56 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -160,14 +160,16 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter, | |||
160 | int i; | 160 | int i; |
161 | 161 | ||
162 | for_each_port(adapter, i) { | 162 | for_each_port(adapter, i) { |
163 | const struct vlan_group *grp; | 163 | struct vlan_group *grp; |
164 | struct net_device *dev = adapter->port[i]; | 164 | struct net_device *dev = adapter->port[i]; |
165 | const struct port_info *p = netdev_priv(dev); | 165 | const struct port_info *p = netdev_priv(dev); |
166 | 166 | ||
167 | if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { | 167 | if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { |
168 | if (vlan && vlan != VLAN_VID_MASK) { | 168 | if (vlan && vlan != VLAN_VID_MASK) { |
169 | grp = p->vlan_grp; | 169 | grp = p->vlan_grp; |
170 | dev = grp ? grp->vlan_devices[vlan] : NULL; | 170 | dev = NULL; |
171 | if (grp) | ||
172 | dev = vlan_group_get_device(grp, vlan); | ||
171 | } else | 173 | } else |
172 | while (dev->master) | 174 | while (dev->master) |
173 | dev = dev->master; | 175 | dev = dev->master; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 98215fdd7d10..1d08e937af82 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -376,7 +376,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
376 | uint16_t vid = adapter->hw.mng_cookie.vlan_id; | 376 | uint16_t vid = adapter->hw.mng_cookie.vlan_id; |
377 | uint16_t old_vid = adapter->mng_vlan_id; | 377 | uint16_t old_vid = adapter->mng_vlan_id; |
378 | if (adapter->vlgrp) { | 378 | if (adapter->vlgrp) { |
379 | if (!adapter->vlgrp->vlan_devices[vid]) { | 379 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { |
380 | if (adapter->hw.mng_cookie.status & | 380 | if (adapter->hw.mng_cookie.status & |
381 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { | 381 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
382 | e1000_vlan_rx_add_vid(netdev, vid); | 382 | e1000_vlan_rx_add_vid(netdev, vid); |
@@ -386,7 +386,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
386 | 386 | ||
387 | if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && | 387 | if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && |
388 | (vid != old_vid) && | 388 | (vid != old_vid) && |
389 | !adapter->vlgrp->vlan_devices[old_vid]) | 389 | !vlan_group_get_device(adapter->vlgrp, old_vid)) |
390 | e1000_vlan_rx_kill_vid(netdev, old_vid); | 390 | e1000_vlan_rx_kill_vid(netdev, old_vid); |
391 | } else | 391 | } else |
392 | adapter->mng_vlan_id = vid; | 392 | adapter->mng_vlan_id = vid; |
@@ -1482,7 +1482,7 @@ e1000_close(struct net_device *netdev) | |||
1482 | if ((adapter->hw.mng_cookie.status & | 1482 | if ((adapter->hw.mng_cookie.status & |
1483 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 1483 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
1484 | !(adapter->vlgrp && | 1484 | !(adapter->vlgrp && |
1485 | adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) { | 1485 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { |
1486 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 1486 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
1487 | } | 1487 | } |
1488 | 1488 | ||
@@ -4998,10 +4998,7 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) | |||
4998 | uint32_t vfta, index; | 4998 | uint32_t vfta, index; |
4999 | 4999 | ||
5000 | e1000_irq_disable(adapter); | 5000 | e1000_irq_disable(adapter); |
5001 | 5001 | vlan_group_set_device(adapter->vlgrp, vid, NULL); | |
5002 | if (adapter->vlgrp) | ||
5003 | adapter->vlgrp->vlan_devices[vid] = NULL; | ||
5004 | |||
5005 | e1000_irq_enable(adapter); | 5002 | e1000_irq_enable(adapter); |
5006 | 5003 | ||
5007 | if ((adapter->hw.mng_cookie.status & | 5004 | if ((adapter->hw.mng_cookie.status & |
@@ -5027,7 +5024,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter) | |||
5027 | if (adapter->vlgrp) { | 5024 | if (adapter->vlgrp) { |
5028 | uint16_t vid; | 5025 | uint16_t vid; |
5029 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 5026 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
5030 | if (!adapter->vlgrp->vlan_devices[vid]) | 5027 | if (!vlan_group_get_device(adapter->vlgrp, vid)) |
5031 | continue; | 5028 | continue; |
5032 | e1000_vlan_rx_add_vid(adapter->netdev, vid); | 5029 | e1000_vlan_rx_add_vid(adapter->netdev, vid); |
5033 | } | 5030 | } |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 88ad1c8bcee4..0e4042bc0a48 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -1939,8 +1939,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
1939 | int index; | 1939 | int index; |
1940 | u64 hret; | 1940 | u64 hret; |
1941 | 1941 | ||
1942 | if (port->vgrp) | 1942 | vlan_group_set_device(port->vgrp, vid, NULL); |
1943 | port->vgrp->vlan_devices[vid] = NULL; | ||
1944 | 1943 | ||
1945 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 1944 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1946 | if (!cb1) { | 1945 | if (!cb1) { |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 1f83988a6a64..02b61b85b62c 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1132,8 +1132,7 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) | |||
1132 | 1132 | ||
1133 | spin_lock_irqsave(&priv->rxlock, flags); | 1133 | spin_lock_irqsave(&priv->rxlock, flags); |
1134 | 1134 | ||
1135 | if (priv->vlgrp) | 1135 | vlan_group_set_device(priv->vgrp, vid, NULL); |
1136 | priv->vlgrp->vlan_devices[vid] = NULL; | ||
1137 | 1136 | ||
1138 | spin_unlock_irqrestore(&priv->rxlock, flags); | 1137 | spin_unlock_irqrestore(&priv->rxlock, flags); |
1139 | } | 1138 | } |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 0c3682889344..afc2ec72529e 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -2213,8 +2213,7 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) | |||
2213 | 2213 | ||
2214 | ixgb_irq_disable(adapter); | 2214 | ixgb_irq_disable(adapter); |
2215 | 2215 | ||
2216 | if(adapter->vlgrp) | 2216 | vlan_group_set_device(adapter->vlgrp, vid, NULL); |
2217 | adapter->vlgrp->vlan_devices[vid] = NULL; | ||
2218 | 2217 | ||
2219 | ixgb_irq_enable(adapter); | 2218 | ixgb_irq_enable(adapter); |
2220 | 2219 | ||
@@ -2234,7 +2233,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) | |||
2234 | if(adapter->vlgrp) { | 2233 | if(adapter->vlgrp) { |
2235 | uint16_t vid; | 2234 | uint16_t vid; |
2236 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 2235 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
2237 | if(!adapter->vlgrp->vlan_devices[vid]) | 2236 | if(!vlan_group_get_device(adapter->vlgrp, vid)) |
2238 | continue; | 2237 | continue; |
2239 | ixgb_vlan_rx_add_vid(adapter->netdev, vid); | 2238 | ixgb_vlan_rx_add_vid(adapter->netdev, vid); |
2240 | } | 2239 | } |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 568daeb3e9d8..9ec6e9e54f47 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -514,8 +514,7 @@ static void ns83820_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid | |||
514 | 514 | ||
515 | spin_lock_irq(&dev->misc_lock); | 515 | spin_lock_irq(&dev->misc_lock); |
516 | spin_lock(&dev->tx_lock); | 516 | spin_lock(&dev->tx_lock); |
517 | if (dev->vlgrp) | 517 | vlan_group_set_device(dev->vlgrp, vid, NULL); |
518 | dev->vlgrp->vlan_devices[vid] = NULL; | ||
519 | spin_unlock(&dev->tx_lock); | 518 | spin_unlock(&dev->tx_lock); |
520 | spin_unlock_irq(&dev->misc_lock); | 519 | spin_unlock_irq(&dev->misc_lock); |
521 | } | 520 | } |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 860bb0f60f68..86e56f1f2f0b 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -7,6 +7,12 @@ | |||
7 | * | 7 | * |
8 | * Version: 0.7.0 | 8 | * Version: 0.7.0 |
9 | * | 9 | * |
10 | * 070228 : Fix to allow multiple sessions with same remote MAC and same | ||
11 | * session id by including the local device ifindex in the | ||
12 | * tuple identifying a session. This also ensures packets can't | ||
13 | * be injected into a session from interfaces other than the one | ||
14 | * specified by userspace. Florian Zumbiehl <florz@florz.de> | ||
15 | * (Oh, BTW, this one is YYMMDD, in case you were wondering ...) | ||
10 | * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme | 16 | * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme |
11 | * 030700 : Fixed connect logic to allow for disconnect. | 17 | * 030700 : Fixed connect logic to allow for disconnect. |
12 | * 270700 : Fixed potential SMP problems; we must protect against | 18 | * 270700 : Fixed potential SMP problems; we must protect against |
@@ -127,14 +133,14 @@ static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE]; | |||
127 | * Set/get/delete/rehash items (internal versions) | 133 | * Set/get/delete/rehash items (internal versions) |
128 | * | 134 | * |
129 | **********************************************************************/ | 135 | **********************************************************************/ |
130 | static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr) | 136 | static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex) |
131 | { | 137 | { |
132 | int hash = hash_item(sid, addr); | 138 | int hash = hash_item(sid, addr); |
133 | struct pppox_sock *ret; | 139 | struct pppox_sock *ret; |
134 | 140 | ||
135 | ret = item_hash_table[hash]; | 141 | ret = item_hash_table[hash]; |
136 | 142 | ||
137 | while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr)) | 143 | while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex)) |
138 | ret = ret->next; | 144 | ret = ret->next; |
139 | 145 | ||
140 | return ret; | 146 | return ret; |
@@ -147,21 +153,19 @@ static int __set_item(struct pppox_sock *po) | |||
147 | 153 | ||
148 | ret = item_hash_table[hash]; | 154 | ret = item_hash_table[hash]; |
149 | while (ret) { | 155 | while (ret) { |
150 | if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa)) | 156 | if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_dev->ifindex == po->pppoe_dev->ifindex) |
151 | return -EALREADY; | 157 | return -EALREADY; |
152 | 158 | ||
153 | ret = ret->next; | 159 | ret = ret->next; |
154 | } | 160 | } |
155 | 161 | ||
156 | if (!ret) { | 162 | po->next = item_hash_table[hash]; |
157 | po->next = item_hash_table[hash]; | 163 | item_hash_table[hash] = po; |
158 | item_hash_table[hash] = po; | ||
159 | } | ||
160 | 164 | ||
161 | return 0; | 165 | return 0; |
162 | } | 166 | } |
163 | 167 | ||
164 | static struct pppox_sock *__delete_item(unsigned long sid, char *addr) | 168 | static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex) |
165 | { | 169 | { |
166 | int hash = hash_item(sid, addr); | 170 | int hash = hash_item(sid, addr); |
167 | struct pppox_sock *ret, **src; | 171 | struct pppox_sock *ret, **src; |
@@ -170,7 +174,7 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr) | |||
170 | src = &item_hash_table[hash]; | 174 | src = &item_hash_table[hash]; |
171 | 175 | ||
172 | while (ret) { | 176 | while (ret) { |
173 | if (cmp_addr(&ret->pppoe_pa, sid, addr)) { | 177 | if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex) { |
174 | *src = ret->next; | 178 | *src = ret->next; |
175 | break; | 179 | break; |
176 | } | 180 | } |
@@ -188,12 +192,12 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr) | |||
188 | * | 192 | * |
189 | **********************************************************************/ | 193 | **********************************************************************/ |
190 | static inline struct pppox_sock *get_item(unsigned long sid, | 194 | static inline struct pppox_sock *get_item(unsigned long sid, |
191 | unsigned char *addr) | 195 | unsigned char *addr, int ifindex) |
192 | { | 196 | { |
193 | struct pppox_sock *po; | 197 | struct pppox_sock *po; |
194 | 198 | ||
195 | read_lock_bh(&pppoe_hash_lock); | 199 | read_lock_bh(&pppoe_hash_lock); |
196 | po = __get_item(sid, addr); | 200 | po = __get_item(sid, addr, ifindex); |
197 | if (po) | 201 | if (po) |
198 | sock_hold(sk_pppox(po)); | 202 | sock_hold(sk_pppox(po)); |
199 | read_unlock_bh(&pppoe_hash_lock); | 203 | read_unlock_bh(&pppoe_hash_lock); |
@@ -203,7 +207,15 @@ static inline struct pppox_sock *get_item(unsigned long sid, | |||
203 | 207 | ||
204 | static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) | 208 | static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) |
205 | { | 209 | { |
206 | return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote); | 210 | struct net_device *dev = NULL; |
211 | int ifindex; | ||
212 | |||
213 | dev = dev_get_by_name(sp->sa_addr.pppoe.dev); | ||
214 | if(!dev) | ||
215 | return NULL; | ||
216 | ifindex = dev->ifindex; | ||
217 | dev_put(dev); | ||
218 | return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); | ||
207 | } | 219 | } |
208 | 220 | ||
209 | static inline int set_item(struct pppox_sock *po) | 221 | static inline int set_item(struct pppox_sock *po) |
@@ -220,12 +232,12 @@ static inline int set_item(struct pppox_sock *po) | |||
220 | return i; | 232 | return i; |
221 | } | 233 | } |
222 | 234 | ||
223 | static inline struct pppox_sock *delete_item(unsigned long sid, char *addr) | 235 | static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) |
224 | { | 236 | { |
225 | struct pppox_sock *ret; | 237 | struct pppox_sock *ret; |
226 | 238 | ||
227 | write_lock_bh(&pppoe_hash_lock); | 239 | write_lock_bh(&pppoe_hash_lock); |
228 | ret = __delete_item(sid, addr); | 240 | ret = __delete_item(sid, addr, ifindex); |
229 | write_unlock_bh(&pppoe_hash_lock); | 241 | write_unlock_bh(&pppoe_hash_lock); |
230 | 242 | ||
231 | return ret; | 243 | return ret; |
@@ -391,7 +403,7 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
391 | 403 | ||
392 | ph = (struct pppoe_hdr *) skb->nh.raw; | 404 | ph = (struct pppoe_hdr *) skb->nh.raw; |
393 | 405 | ||
394 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); | 406 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); |
395 | if (po != NULL) | 407 | if (po != NULL) |
396 | return sk_receive_skb(sk_pppox(po), skb, 0); | 408 | return sk_receive_skb(sk_pppox(po), skb, 0); |
397 | drop: | 409 | drop: |
@@ -425,7 +437,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb, | |||
425 | if (ph->code != PADT_CODE) | 437 | if (ph->code != PADT_CODE) |
426 | goto abort; | 438 | goto abort; |
427 | 439 | ||
428 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); | 440 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); |
429 | if (po) { | 441 | if (po) { |
430 | struct sock *sk = sk_pppox(po); | 442 | struct sock *sk = sk_pppox(po); |
431 | 443 | ||
@@ -517,7 +529,7 @@ static int pppoe_release(struct socket *sock) | |||
517 | 529 | ||
518 | po = pppox_sk(sk); | 530 | po = pppox_sk(sk); |
519 | if (po->pppoe_pa.sid) { | 531 | if (po->pppoe_pa.sid) { |
520 | delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote); | 532 | delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_dev->ifindex); |
521 | } | 533 | } |
522 | 534 | ||
523 | if (po->pppoe_dev) | 535 | if (po->pppoe_dev) |
@@ -539,7 +551,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
539 | int sockaddr_len, int flags) | 551 | int sockaddr_len, int flags) |
540 | { | 552 | { |
541 | struct sock *sk = sock->sk; | 553 | struct sock *sk = sock->sk; |
542 | struct net_device *dev = NULL; | 554 | struct net_device *dev; |
543 | struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; | 555 | struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; |
544 | struct pppox_sock *po = pppox_sk(sk); | 556 | struct pppox_sock *po = pppox_sk(sk); |
545 | int error; | 557 | int error; |
@@ -565,7 +577,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
565 | pppox_unbind_sock(sk); | 577 | pppox_unbind_sock(sk); |
566 | 578 | ||
567 | /* Delete the old binding */ | 579 | /* Delete the old binding */ |
568 | delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote); | 580 | delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_dev->ifindex); |
569 | 581 | ||
570 | if(po->pppoe_dev) | 582 | if(po->pppoe_dev) |
571 | dev_put(po->pppoe_dev); | 583 | dev_put(po->pppoe_dev); |
@@ -705,7 +717,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
705 | break; | 717 | break; |
706 | 718 | ||
707 | /* PPPoE address from the user specifies an outbound | 719 | /* PPPoE address from the user specifies an outbound |
708 | PPPoE address to which frames are forwarded to */ | 720 | PPPoE address which frames are forwarded to */ |
709 | err = -EFAULT; | 721 | err = -EFAULT; |
710 | if (copy_from_user(&po->pppoe_relay, | 722 | if (copy_from_user(&po->pppoe_relay, |
711 | (void __user *)arg, | 723 | (void __user *)arg, |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 13cf06ee97f7..15d954e50cae 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -890,8 +890,7 @@ static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
890 | unsigned long flags; | 890 | unsigned long flags; |
891 | 891 | ||
892 | spin_lock_irqsave(&tp->lock, flags); | 892 | spin_lock_irqsave(&tp->lock, flags); |
893 | if (tp->vlgrp) | 893 | vlan_group_set_device(tp->vlgrp, vid, NULL); |
894 | tp->vlgrp->vlan_devices[vid] = NULL; | ||
895 | spin_unlock_irqrestore(&tp->lock, flags); | 894 | spin_unlock_irqrestore(&tp->lock, flags); |
896 | } | 895 | } |
897 | 896 | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 44bb2395af84..46ebf141ee5a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -325,8 +325,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) | |||
325 | unsigned long flags; | 325 | unsigned long flags; |
326 | 326 | ||
327 | spin_lock_irqsave(&nic->tx_lock, flags); | 327 | spin_lock_irqsave(&nic->tx_lock, flags); |
328 | if (nic->vlgrp) | 328 | vlan_group_set_device(nic->vlgrp, vid, NULL); |
329 | nic->vlgrp->vlan_devices[vid] = NULL; | ||
330 | spin_unlock_irqrestore(&nic->tx_lock, flags); | 329 | spin_unlock_irqrestore(&nic->tx_lock, flags); |
331 | } | 330 | } |
332 | 331 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 52edbd7ac17e..53839979cfb8 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1053,8 +1053,7 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
1053 | 1053 | ||
1054 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); | 1054 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); |
1055 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); | 1055 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); |
1056 | if (sky2->vlgrp) | 1056 | vlan_group_set_device(sky2->vlgrp, vid, NULL); |
1057 | sky2->vlgrp->vlan_devices[vid] = NULL; | ||
1058 | 1057 | ||
1059 | netif_tx_unlock_bh(dev); | 1058 | netif_tx_unlock_bh(dev); |
1060 | } | 1059 | } |
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index bf873ea25797..8bba2e3da7e1 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -677,8 +677,7 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
677 | spin_lock(&np->lock); | 677 | spin_lock(&np->lock); |
678 | if (debug > 1) | 678 | if (debug > 1) |
679 | printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); | 679 | printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); |
680 | if (np->vlgrp) | 680 | vlan_group_set_device(np->vlgrp, vid, NULL); |
681 | np->vlgrp->vlan_devices[vid] = NULL; | ||
682 | set_rx_mode(dev); | 681 | set_rx_mode(dev); |
683 | spin_unlock(&np->lock); | 682 | spin_unlock(&np->lock); |
684 | } | 683 | } |
@@ -1738,7 +1737,7 @@ static void set_rx_mode(struct net_device *dev) | |||
1738 | int vlan_count = 0; | 1737 | int vlan_count = 0; |
1739 | void __iomem *filter_addr = ioaddr + HashTable + 8; | 1738 | void __iomem *filter_addr = ioaddr + HashTable + 8; |
1740 | for (i = 0; i < VLAN_VID_MASK; i++) { | 1739 | for (i = 0; i < VLAN_VID_MASK; i++) { |
1741 | if (np->vlgrp->vlan_devices[i]) { | 1740 | if (vlan_group_get_device(np->vlgrp, i)) { |
1742 | if (vlan_count >= 32) | 1741 | if (vlan_count >= 32) |
1743 | break; | 1742 | break; |
1744 | writew(cpu_to_be16(i), filter_addr); | 1743 | writew(cpu_to_be16(i), filter_addr); |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 26c6ac48288c..8c8f9f4d47a5 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -9114,8 +9114,7 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
9114 | tg3_netif_stop(tp); | 9114 | tg3_netif_stop(tp); |
9115 | 9115 | ||
9116 | tg3_full_lock(tp, 0); | 9116 | tg3_full_lock(tp, 0); |
9117 | if (tp->vlgrp) | 9117 | vlan_group_set_device(tp->vlgrp, vid, NULL); |
9118 | tp->vlgrp->vlan_devices[vid] = NULL; | ||
9119 | tg3_full_unlock(tp); | 9118 | tg3_full_unlock(tp); |
9120 | 9119 | ||
9121 | if (netif_running(dev)) | 9120 | if (netif_running(dev)) |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 9781b16bb8b6..0d91d094edd9 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -746,8 +746,7 @@ typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
746 | { | 746 | { |
747 | struct typhoon *tp = netdev_priv(dev); | 747 | struct typhoon *tp = netdev_priv(dev); |
748 | spin_lock_bh(&tp->state_lock); | 748 | spin_lock_bh(&tp->state_lock); |
749 | if(tp->vlgrp) | 749 | vlan_group_set_device(tp->vlgrp, vid, NULL); |
750 | tp->vlgrp->vlan_devices[vid] = NULL; | ||
751 | spin_unlock_bh(&tp->state_lock); | 750 | spin_unlock_bh(&tp->state_lock); |
752 | } | 751 | } |
753 | 752 | ||
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 9040d7cf651e..65ad2e24caf0 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <linux/hdlc.h> | 38 | #include <linux/hdlc.h> |
39 | 39 | ||
40 | 40 | ||
41 | static const char* version = "HDLC support module revision 1.20"; | 41 | static const char* version = "HDLC support module revision 1.21"; |
42 | 42 | ||
43 | #undef DEBUG_LINK | 43 | #undef DEBUG_LINK |
44 | 44 | ||
@@ -222,19 +222,31 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
222 | return -EINVAL; | 222 | return -EINVAL; |
223 | } | 223 | } |
224 | 224 | ||
225 | static void hdlc_setup_dev(struct net_device *dev) | ||
226 | { | ||
227 | /* Re-init all variables changed by HDLC protocol drivers, | ||
228 | * including ether_setup() called from hdlc_raw_eth.c. | ||
229 | */ | ||
230 | dev->get_stats = hdlc_get_stats; | ||
231 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | ||
232 | dev->mtu = HDLC_MAX_MTU; | ||
233 | dev->type = ARPHRD_RAWHDLC; | ||
234 | dev->hard_header_len = 16; | ||
235 | dev->addr_len = 0; | ||
236 | dev->hard_header = NULL; | ||
237 | dev->rebuild_header = NULL; | ||
238 | dev->set_mac_address = NULL; | ||
239 | dev->hard_header_cache = NULL; | ||
240 | dev->header_cache_update = NULL; | ||
241 | dev->change_mtu = hdlc_change_mtu; | ||
242 | dev->hard_header_parse = NULL; | ||
243 | } | ||
244 | |||
225 | static void hdlc_setup(struct net_device *dev) | 245 | static void hdlc_setup(struct net_device *dev) |
226 | { | 246 | { |
227 | hdlc_device *hdlc = dev_to_hdlc(dev); | 247 | hdlc_device *hdlc = dev_to_hdlc(dev); |
228 | 248 | ||
229 | dev->get_stats = hdlc_get_stats; | 249 | hdlc_setup_dev(dev); |
230 | dev->change_mtu = hdlc_change_mtu; | ||
231 | dev->mtu = HDLC_MAX_MTU; | ||
232 | |||
233 | dev->type = ARPHRD_RAWHDLC; | ||
234 | dev->hard_header_len = 16; | ||
235 | |||
236 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | ||
237 | |||
238 | hdlc->carrier = 1; | 250 | hdlc->carrier = 1; |
239 | hdlc->open = 0; | 251 | hdlc->open = 0; |
240 | spin_lock_init(&hdlc->state_lock); | 252 | spin_lock_init(&hdlc->state_lock); |
@@ -294,6 +306,7 @@ void detach_hdlc_protocol(struct net_device *dev) | |||
294 | } | 306 | } |
295 | kfree(hdlc->state); | 307 | kfree(hdlc->state); |
296 | hdlc->state = NULL; | 308 | hdlc->state = NULL; |
309 | hdlc_setup_dev(dev); | ||
297 | } | 310 | } |
298 | 311 | ||
299 | 312 | ||
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index b0bc5ddcf1b1..c9664fd8a917 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -365,10 +365,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
365 | memcpy(&state(hdlc)->settings, &new_settings, size); | 365 | memcpy(&state(hdlc)->settings, &new_settings, size); |
366 | dev->hard_start_xmit = hdlc->xmit; | 366 | dev->hard_start_xmit = hdlc->xmit; |
367 | dev->hard_header = cisco_hard_header; | 367 | dev->hard_header = cisco_hard_header; |
368 | dev->hard_header_cache = NULL; | ||
369 | dev->type = ARPHRD_CISCO; | 368 | dev->type = ARPHRD_CISCO; |
370 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | ||
371 | dev->addr_len = 0; | ||
372 | netif_dormant_on(dev); | 369 | netif_dormant_on(dev); |
373 | return 0; | 370 | return 0; |
374 | } | 371 | } |
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index b45ab680d2d6..c6c3c757d6f1 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -1289,10 +1289,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
1289 | memcpy(&state(hdlc)->settings, &new_settings, size); | 1289 | memcpy(&state(hdlc)->settings, &new_settings, size); |
1290 | 1290 | ||
1291 | dev->hard_start_xmit = hdlc->xmit; | 1291 | dev->hard_start_xmit = hdlc->xmit; |
1292 | dev->hard_header = NULL; | ||
1293 | dev->type = ARPHRD_FRAD; | 1292 | dev->type = ARPHRD_FRAD; |
1294 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | ||
1295 | dev->addr_len = 0; | ||
1296 | return 0; | 1293 | return 0; |
1297 | 1294 | ||
1298 | case IF_PROTO_FR_ADD_PVC: | 1295 | case IF_PROTO_FR_ADD_PVC: |
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index e9f717070fde..4591437dd2f3 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -127,9 +127,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
127 | if (result) | 127 | if (result) |
128 | return result; | 128 | return result; |
129 | dev->hard_start_xmit = hdlc->xmit; | 129 | dev->hard_start_xmit = hdlc->xmit; |
130 | dev->hard_header = NULL; | ||
131 | dev->type = ARPHRD_PPP; | 130 | dev->type = ARPHRD_PPP; |
132 | dev->addr_len = 0; | ||
133 | netif_dormant_off(dev); | 131 | netif_dormant_off(dev); |
134 | return 0; | 132 | return 0; |
135 | } | 133 | } |
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index fe3cae5c6b9d..e23bc6656267 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c | |||
@@ -88,10 +88,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
88 | return result; | 88 | return result; |
89 | memcpy(hdlc->state, &new_settings, size); | 89 | memcpy(hdlc->state, &new_settings, size); |
90 | dev->hard_start_xmit = hdlc->xmit; | 90 | dev->hard_start_xmit = hdlc->xmit; |
91 | dev->hard_header = NULL; | ||
92 | dev->type = ARPHRD_RAWHDLC; | 91 | dev->type = ARPHRD_RAWHDLC; |
93 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | ||
94 | dev->addr_len = 0; | ||
95 | netif_dormant_off(dev); | 92 | netif_dormant_off(dev); |
96 | return 0; | 93 | return 0; |
97 | } | 94 | } |
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index e4bb9f8ad433..cd7b22f50edc 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c | |||
@@ -215,9 +215,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
215 | x25_rx, 0)) != 0) | 215 | x25_rx, 0)) != 0) |
216 | return result; | 216 | return result; |
217 | dev->hard_start_xmit = x25_xmit; | 217 | dev->hard_start_xmit = x25_xmit; |
218 | dev->hard_header = NULL; | ||
219 | dev->type = ARPHRD_X25; | 218 | dev->type = ARPHRD_X25; |
220 | dev->addr_len = 0; | ||
221 | netif_dormant_off(dev); | 219 | netif_dormant_off(dev); |
222 | return 0; | 220 | return 0; |
223 | } | 221 | } |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index deef29646e0e..95826b92ca4b 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -207,10 +207,12 @@ config RTC_DRV_PCF8563 | |||
207 | 207 | ||
208 | config RTC_DRV_PCF8583 | 208 | config RTC_DRV_PCF8583 |
209 | tristate "Philips PCF8583" | 209 | tristate "Philips PCF8583" |
210 | depends on RTC_CLASS && I2C | 210 | depends on RTC_CLASS && I2C && ARCH_RPC |
211 | help | 211 | help |
212 | If you say yes here you get support for the | 212 | If you say yes here you get support for the Philips PCF8583 |
213 | Philips PCF8583 RTC chip. | 213 | RTC chip found on Acorn RiscPCs. This driver supports the |
214 | platform specific method of retrieving the current year from | ||
215 | the RTC's SRAM. | ||
214 | 216 | ||
215 | This driver can also be built as a module. If so, the module | 217 | This driver can also be built as a module. If so, the module |
216 | will be called rtc-pcf8583. | 218 | will be called rtc-pcf8583. |
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c index 5875ebb8c79d..d48b03374586 100644 --- a/drivers/rtc/rtc-pcf8583.c +++ b/drivers/rtc/rtc-pcf8583.c | |||
@@ -40,7 +40,7 @@ struct pcf8583 { | |||
40 | #define CTRL_ALARM 0x02 | 40 | #define CTRL_ALARM 0x02 |
41 | #define CTRL_TIMER 0x01 | 41 | #define CTRL_TIMER 0x01 |
42 | 42 | ||
43 | static unsigned short normal_i2c[] = { I2C_CLIENT_END }; | 43 | static unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; |
44 | 44 | ||
45 | /* Module parameters */ | 45 | /* Module parameters */ |
46 | I2C_CLIENT_INSMOD; | 46 | I2C_CLIENT_INSMOD; |
@@ -81,11 +81,11 @@ static int pcf8583_get_datetime(struct i2c_client *client, struct rtc_time *dt) | |||
81 | buf[4] &= 0x3f; | 81 | buf[4] &= 0x3f; |
82 | buf[5] &= 0x1f; | 82 | buf[5] &= 0x1f; |
83 | 83 | ||
84 | dt->tm_sec = BCD_TO_BIN(buf[1]); | 84 | dt->tm_sec = BCD2BIN(buf[1]); |
85 | dt->tm_min = BCD_TO_BIN(buf[2]); | 85 | dt->tm_min = BCD2BIN(buf[2]); |
86 | dt->tm_hour = BCD_TO_BIN(buf[3]); | 86 | dt->tm_hour = BCD2BIN(buf[3]); |
87 | dt->tm_mday = BCD_TO_BIN(buf[4]); | 87 | dt->tm_mday = BCD2BIN(buf[4]); |
88 | dt->tm_mon = BCD_TO_BIN(buf[5]); | 88 | dt->tm_mon = BCD2BIN(buf[5]) - 1; |
89 | } | 89 | } |
90 | 90 | ||
91 | return ret == 2 ? 0 : -EIO; | 91 | return ret == 2 ? 0 : -EIO; |
@@ -99,14 +99,14 @@ static int pcf8583_set_datetime(struct i2c_client *client, struct rtc_time *dt, | |||
99 | buf[0] = 0; | 99 | buf[0] = 0; |
100 | buf[1] = get_ctrl(client) | 0x80; | 100 | buf[1] = get_ctrl(client) | 0x80; |
101 | buf[2] = 0; | 101 | buf[2] = 0; |
102 | buf[3] = BIN_TO_BCD(dt->tm_sec); | 102 | buf[3] = BIN2BCD(dt->tm_sec); |
103 | buf[4] = BIN_TO_BCD(dt->tm_min); | 103 | buf[4] = BIN2BCD(dt->tm_min); |
104 | buf[5] = BIN_TO_BCD(dt->tm_hour); | 104 | buf[5] = BIN2BCD(dt->tm_hour); |
105 | 105 | ||
106 | if (datetoo) { | 106 | if (datetoo) { |
107 | len = 8; | 107 | len = 8; |
108 | buf[6] = BIN_TO_BCD(dt->tm_mday) | (dt->tm_year << 6); | 108 | buf[6] = BIN2BCD(dt->tm_mday) | (dt->tm_year << 6); |
109 | buf[7] = BIN_TO_BCD(dt->tm_mon) | (dt->tm_wday << 5); | 109 | buf[7] = BIN2BCD(dt->tm_mon + 1) | (dt->tm_wday << 5); |
110 | } | 110 | } |
111 | 111 | ||
112 | ret = i2c_master_send(client, (char *)buf, len); | 112 | ret = i2c_master_send(client, (char *)buf, len); |
@@ -226,7 +226,7 @@ static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
226 | */ | 226 | */ |
227 | year_offset += 4; | 227 | year_offset += 4; |
228 | 228 | ||
229 | tm->tm_year = real_year + year_offset + year[1] * 100; | 229 | tm->tm_year = (real_year + year_offset + year[1] * 100) - 1900; |
230 | 230 | ||
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
@@ -237,6 +237,7 @@ static int pcf8583_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
237 | unsigned char year[2], chk; | 237 | unsigned char year[2], chk; |
238 | struct rtc_mem cmos_year = { CMOS_YEAR, sizeof(year), year }; | 238 | struct rtc_mem cmos_year = { CMOS_YEAR, sizeof(year), year }; |
239 | struct rtc_mem cmos_check = { CMOS_CHECKSUM, 1, &chk }; | 239 | struct rtc_mem cmos_check = { CMOS_CHECKSUM, 1, &chk }; |
240 | unsigned int proper_year = tm->tm_year + 1900; | ||
240 | int ret; | 241 | int ret; |
241 | 242 | ||
242 | /* | 243 | /* |
@@ -258,8 +259,8 @@ static int pcf8583_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
258 | 259 | ||
259 | chk -= year[1] + year[0]; | 260 | chk -= year[1] + year[0]; |
260 | 261 | ||
261 | year[1] = tm->tm_year / 100; | 262 | year[1] = proper_year / 100; |
262 | year[0] = tm->tm_year % 100; | 263 | year[0] = proper_year % 100; |
263 | 264 | ||
264 | chk += year[1] + year[0]; | 265 | chk += year[1] + year[0]; |
265 | 266 | ||
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 2257e45594b3..d8a86f5af379 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -3654,7 +3654,7 @@ qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card) | |||
3654 | return rc; | 3654 | return rc; |
3655 | 3655 | ||
3656 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ | 3656 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ |
3657 | if (vg->vlan_devices[i] == dev){ | 3657 | if (vlan_group_get_device(vg, i) == dev){ |
3658 | rc = QETH_VLAN_CARD; | 3658 | rc = QETH_VLAN_CARD; |
3659 | break; | 3659 | break; |
3660 | } | 3660 | } |
@@ -5261,7 +5261,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid) | |||
5261 | QETH_DBF_TEXT(trace, 4, "frvaddr4"); | 5261 | QETH_DBF_TEXT(trace, 4, "frvaddr4"); |
5262 | 5262 | ||
5263 | rcu_read_lock(); | 5263 | rcu_read_lock(); |
5264 | in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); | 5264 | in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid)); |
5265 | if (!in_dev) | 5265 | if (!in_dev) |
5266 | goto out; | 5266 | goto out; |
5267 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { | 5267 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { |
@@ -5288,7 +5288,7 @@ qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid) | |||
5288 | 5288 | ||
5289 | QETH_DBF_TEXT(trace, 4, "frvaddr6"); | 5289 | QETH_DBF_TEXT(trace, 4, "frvaddr6"); |
5290 | 5290 | ||
5291 | in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]); | 5291 | in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); |
5292 | if (!in6_dev) | 5292 | if (!in6_dev) |
5293 | return; | 5293 | return; |
5294 | for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ | 5294 | for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ |
@@ -5360,7 +5360,7 @@ qeth_layer2_process_vlans(struct qeth_card *card, int clear) | |||
5360 | if (!card->vlangrp) | 5360 | if (!card->vlangrp) |
5361 | return; | 5361 | return; |
5362 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 5362 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { |
5363 | if (card->vlangrp->vlan_devices[i] == NULL) | 5363 | if (vlan_group_get_device(card->vlangrp, i) == NULL) |
5364 | continue; | 5364 | continue; |
5365 | if (clear) | 5365 | if (clear) |
5366 | qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); | 5366 | qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); |
@@ -5398,8 +5398,7 @@ qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
5398 | spin_lock_irqsave(&card->vlanlock, flags); | 5398 | spin_lock_irqsave(&card->vlanlock, flags); |
5399 | /* unregister IP addresses of vlan device */ | 5399 | /* unregister IP addresses of vlan device */ |
5400 | qeth_free_vlan_addresses(card, vid); | 5400 | qeth_free_vlan_addresses(card, vid); |
5401 | if (card->vlangrp) | 5401 | vlan_group_set_device(card->vlangrp, vid, NULL); |
5402 | card->vlangrp->vlan_devices[vid] = NULL; | ||
5403 | spin_unlock_irqrestore(&card->vlanlock, flags); | 5402 | spin_unlock_irqrestore(&card->vlanlock, flags); |
5404 | if (card->options.layer2) | 5403 | if (card->options.layer2) |
5405 | qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); | 5404 | qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); |
@@ -5662,10 +5661,11 @@ qeth_add_vlan_mc(struct qeth_card *card) | |||
5662 | 5661 | ||
5663 | vg = card->vlangrp; | 5662 | vg = card->vlangrp; |
5664 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 5663 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { |
5665 | if (vg->vlan_devices[i] == NULL || | 5664 | struct net_device *netdev = vlan_group_get_device(vg, i); |
5666 | !(vg->vlan_devices[i]->flags & IFF_UP)) | 5665 | if (netdev == NULL || |
5666 | !(netdev->flags & IFF_UP)) | ||
5667 | continue; | 5667 | continue; |
5668 | in_dev = in_dev_get(vg->vlan_devices[i]); | 5668 | in_dev = in_dev_get(netdev); |
5669 | if (!in_dev) | 5669 | if (!in_dev) |
5670 | continue; | 5670 | continue; |
5671 | read_lock(&in_dev->mc_list_lock); | 5671 | read_lock(&in_dev->mc_list_lock); |
@@ -5749,10 +5749,11 @@ qeth_add_vlan_mc6(struct qeth_card *card) | |||
5749 | 5749 | ||
5750 | vg = card->vlangrp; | 5750 | vg = card->vlangrp; |
5751 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 5751 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { |
5752 | if (vg->vlan_devices[i] == NULL || | 5752 | struct net_device *netdev = vlan_group_get_device(vg, i); |
5753 | !(vg->vlan_devices[i]->flags & IFF_UP)) | 5753 | if (netdev == NULL || |
5754 | !(netdev->flags & IFF_UP)) | ||
5754 | continue; | 5755 | continue; |
5755 | in_dev = in6_dev_get(vg->vlan_devices[i]); | 5756 | in_dev = in6_dev_get(netdev); |
5756 | if (!in_dev) | 5757 | if (!in_dev) |
5757 | continue; | 5758 | continue; |
5758 | read_lock(&in_dev->lock); | 5759 | read_lock(&in_dev->lock); |
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index d2d51dc51ab8..82add77ad131 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c | |||
@@ -178,10 +178,10 @@ cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, | |||
178 | dma_dir = DMA_MODE_READ, | 178 | dma_dir = DMA_MODE_READ, |
179 | alatch_dir = ALATCH_DMA_IN; | 179 | alatch_dir = ALATCH_DMA_IN; |
180 | 180 | ||
181 | dma_map_sg(dev, info->sg, bufs + 1, map_dir); | 181 | dma_map_sg(dev, info->sg, bufs, map_dir); |
182 | 182 | ||
183 | disable_dma(dmach); | 183 | disable_dma(dmach); |
184 | set_dma_sg(dmach, info->sg, bufs + 1); | 184 | set_dma_sg(dmach, info->sg, bufs); |
185 | writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH); | 185 | writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH); |
186 | set_dma_mode(dmach, dma_dir); | 186 | set_dma_mode(dmach, dma_dir); |
187 | enable_dma(dmach); | 187 | enable_dma(dmach); |
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index d4136524fc46..ed06a8c19ad6 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c | |||
@@ -175,10 +175,10 @@ eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, | |||
175 | map_dir = DMA_FROM_DEVICE, | 175 | map_dir = DMA_FROM_DEVICE, |
176 | dma_dir = DMA_MODE_READ; | 176 | dma_dir = DMA_MODE_READ; |
177 | 177 | ||
178 | dma_map_sg(dev, info->sg, bufs + 1, map_dir); | 178 | dma_map_sg(dev, info->sg, bufs, map_dir); |
179 | 179 | ||
180 | disable_dma(dmach); | 180 | disable_dma(dmach); |
181 | set_dma_sg(dmach, info->sg, bufs + 1); | 181 | set_dma_sg(dmach, info->sg, bufs); |
182 | set_dma_mode(dmach, dma_dir); | 182 | set_dma_mode(dmach, dma_dir); |
183 | enable_dma(dmach); | 183 | enable_dma(dmach); |
184 | return fasdma_real_all; | 184 | return fasdma_real_all; |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 2969cc0ff259..fb5f20284389 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -633,7 +633,7 @@ static void fas216_updateptrs(FAS216_Info *info, int bytes_transferred) | |||
633 | 633 | ||
634 | BUG_ON(bytes_transferred < 0); | 634 | BUG_ON(bytes_transferred < 0); |
635 | 635 | ||
636 | info->SCpnt->request_bufflen -= bytes_transferred; | 636 | SCp->phase -= bytes_transferred; |
637 | 637 | ||
638 | while (bytes_transferred != 0) { | 638 | while (bytes_transferred != 0) { |
639 | if (SCp->this_residual > bytes_transferred) | 639 | if (SCp->this_residual > bytes_transferred) |
@@ -715,7 +715,7 @@ static void fas216_cleanuptransfer(FAS216_Info *info) | |||
715 | return; | 715 | return; |
716 | 716 | ||
717 | if (dmatype == fasdma_real_all) | 717 | if (dmatype == fasdma_real_all) |
718 | total = info->SCpnt->request_bufflen; | 718 | total = info->scsi.SCp.phase; |
719 | else | 719 | else |
720 | total = info->scsi.SCp.this_residual; | 720 | total = info->scsi.SCp.this_residual; |
721 | 721 | ||
@@ -753,7 +753,7 @@ static void fas216_transfer(FAS216_Info *info) | |||
753 | fas216_log(info, LOG_BUFFER, | 753 | fas216_log(info, LOG_BUFFER, |
754 | "starttransfer: buffer %p length 0x%06x reqlen 0x%06x", | 754 | "starttransfer: buffer %p length 0x%06x reqlen 0x%06x", |
755 | info->scsi.SCp.ptr, info->scsi.SCp.this_residual, | 755 | info->scsi.SCp.ptr, info->scsi.SCp.this_residual, |
756 | info->SCpnt->request_bufflen); | 756 | info->scsi.SCp.phase); |
757 | 757 | ||
758 | if (!info->scsi.SCp.ptr) { | 758 | if (!info->scsi.SCp.ptr) { |
759 | fas216_log(info, LOG_ERROR, "null buffer passed to " | 759 | fas216_log(info, LOG_ERROR, "null buffer passed to " |
@@ -784,7 +784,7 @@ static void fas216_transfer(FAS216_Info *info) | |||
784 | info->dma.transfer_type = dmatype; | 784 | info->dma.transfer_type = dmatype; |
785 | 785 | ||
786 | if (dmatype == fasdma_real_all) | 786 | if (dmatype == fasdma_real_all) |
787 | fas216_set_stc(info, info->SCpnt->request_bufflen); | 787 | fas216_set_stc(info, info->scsi.SCp.phase); |
788 | else | 788 | else |
789 | fas216_set_stc(info, info->scsi.SCp.this_residual); | 789 | fas216_set_stc(info, info->scsi.SCp.this_residual); |
790 | 790 | ||
@@ -2114,6 +2114,7 @@ request_sense: | |||
2114 | SCpnt->SCp.buffers_residual = 0; | 2114 | SCpnt->SCp.buffers_residual = 0; |
2115 | SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer; | 2115 | SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer; |
2116 | SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); | 2116 | SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); |
2117 | SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer); | ||
2117 | SCpnt->SCp.Message = 0; | 2118 | SCpnt->SCp.Message = 0; |
2118 | SCpnt->SCp.Status = 0; | 2119 | SCpnt->SCp.Status = 0; |
2119 | SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); | 2120 | SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); |
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index f9cd20bfb958..159047a34997 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c | |||
@@ -148,10 +148,10 @@ powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, | |||
148 | map_dir = DMA_FROM_DEVICE, | 148 | map_dir = DMA_FROM_DEVICE, |
149 | dma_dir = DMA_MODE_READ; | 149 | dma_dir = DMA_MODE_READ; |
150 | 150 | ||
151 | dma_map_sg(dev, info->sg, bufs + 1, map_dir); | 151 | dma_map_sg(dev, info->sg, bufs, map_dir); |
152 | 152 | ||
153 | disable_dma(dmach); | 153 | disable_dma(dmach); |
154 | set_dma_sg(dmach, info->sg, bufs + 1); | 154 | set_dma_sg(dmach, info->sg, bufs); |
155 | set_dma_mode(dmach, dma_dir); | 155 | set_dma_mode(dmach, dma_dir); |
156 | enable_dma(dmach); | 156 | enable_dma(dmach); |
157 | return fasdma_real_all; | 157 | return fasdma_real_all; |
@@ -342,6 +342,7 @@ powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
342 | info->base = base; | 342 | info->base = base; |
343 | powertecscsi_terminator_ctl(host, term[ec->slot_no]); | 343 | powertecscsi_terminator_ctl(host, term[ec->slot_no]); |
344 | 344 | ||
345 | info->ec = ec; | ||
345 | info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET; | 346 | info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET; |
346 | info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT; | 347 | info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT; |
347 | info->info.scsi.irq = ec->irq; | 348 | info->info.scsi.irq = ec->irq; |
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h index 3a39579bd08e..21ba57155bea 100644 --- a/drivers/scsi/arm/scsi.h +++ b/drivers/scsi/arm/scsi.h | |||
@@ -80,6 +80,7 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt) | |||
80 | (page_address(SCpnt->SCp.buffer->page) + | 80 | (page_address(SCpnt->SCp.buffer->page) + |
81 | SCpnt->SCp.buffer->offset); | 81 | SCpnt->SCp.buffer->offset); |
82 | SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; | 82 | SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; |
83 | SCpnt->SCp.phase = SCpnt->request_bufflen; | ||
83 | 84 | ||
84 | #ifdef BELT_AND_BRACES | 85 | #ifdef BELT_AND_BRACES |
85 | /* | 86 | /* |
@@ -98,6 +99,7 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt) | |||
98 | } else { | 99 | } else { |
99 | SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer; | 100 | SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer; |
100 | SCpnt->SCp.this_residual = SCpnt->request_bufflen; | 101 | SCpnt->SCp.this_residual = SCpnt->request_bufflen; |
102 | SCpnt->SCp.phase = SCpnt->request_bufflen; | ||
101 | } | 103 | } |
102 | 104 | ||
103 | /* | 105 | /* |