diff options
Diffstat (limited to 'drivers/media')
| -rw-r--r-- | drivers/media/dvb-frontends/Kconfig | 2 | ||||
| -rw-r--r-- | drivers/media/pci/cobalt/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/media/pci/cobalt/cobalt-irq.c | 2 | ||||
| -rw-r--r-- | drivers/media/pci/ivtv/ivtvfb.c | 15 | ||||
| -rw-r--r-- | drivers/media/pci/mantis/mantis_dma.c | 5 | ||||
| -rw-r--r-- | drivers/media/rc/ir-rc5-decoder.c | 116 | ||||
| -rw-r--r-- | drivers/media/rc/ir-rc6-decoder.c | 122 | ||||
| -rw-r--r-- | drivers/media/rc/nuvoton-cir.c | 127 | ||||
| -rw-r--r-- | drivers/media/rc/nuvoton-cir.h | 1 | ||||
| -rw-r--r-- | drivers/media/rc/rc-core-priv.h | 36 | ||||
| -rw-r--r-- | drivers/media/rc/rc-ir-raw.c | 139 | ||||
| -rw-r--r-- | drivers/media/rc/rc-loopback.c | 36 | ||||
| -rw-r--r-- | drivers/media/rc/rc-main.c | 7 | ||||
| -rw-r--r-- | drivers/media/v4l2-core/videobuf2-core.c | 40 |
14 files changed, 41 insertions, 608 deletions
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 0d35f5850ff1..5ab90f36a6a6 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig | |||
| @@ -240,7 +240,7 @@ config DVB_SI21XX | |||
| 240 | 240 | ||
| 241 | config DVB_TS2020 | 241 | config DVB_TS2020 |
| 242 | tristate "Montage Tehnology TS2020 based tuners" | 242 | tristate "Montage Tehnology TS2020 based tuners" |
| 243 | depends on DVB_CORE | 243 | depends on DVB_CORE && I2C |
| 244 | select REGMAP_I2C | 244 | select REGMAP_I2C |
| 245 | default m if !MEDIA_SUBDRV_AUTOSELECT | 245 | default m if !MEDIA_SUBDRV_AUTOSELECT |
| 246 | help | 246 | help |
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig index 3be1b2c3c386..6a1c0089bb62 100644 --- a/drivers/media/pci/cobalt/Kconfig +++ b/drivers/media/pci/cobalt/Kconfig | |||
| @@ -2,6 +2,7 @@ config VIDEO_COBALT | |||
| 2 | tristate "Cisco Cobalt support" | 2 | tristate "Cisco Cobalt support" |
| 3 | depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER | 3 | depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER |
| 4 | depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB | 4 | depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB |
| 5 | depends on SND | ||
| 5 | select I2C_ALGOBIT | 6 | select I2C_ALGOBIT |
| 6 | select VIDEO_ADV7604 | 7 | select VIDEO_ADV7604 |
| 7 | select VIDEO_ADV7511 | 8 | select VIDEO_ADV7511 |
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c index dd4bff9cf339..d1f5898d11ba 100644 --- a/drivers/media/pci/cobalt/cobalt-irq.c +++ b/drivers/media/pci/cobalt/cobalt-irq.c | |||
| @@ -139,7 +139,7 @@ done: | |||
| 139 | also know about dropped frames. */ | 139 | also know about dropped frames. */ |
| 140 | cb->vb.v4l2_buf.sequence = s->sequence++; | 140 | cb->vb.v4l2_buf.sequence = s->sequence++; |
| 141 | vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? | 141 | vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? |
| 142 | VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE); | 142 | VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | irqreturn_t cobalt_irq_handler(int irq, void *dev_id) | 145 | irqreturn_t cobalt_irq_handler(int irq, void *dev_id) |
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 4cb365d4ffdc..8b95eefb610b 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c | |||
| @@ -38,6 +38,8 @@ | |||
| 38 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 38 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 39 | */ | 39 | */ |
| 40 | 40 | ||
| 41 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 42 | |||
| 41 | #include <linux/module.h> | 43 | #include <linux/module.h> |
| 42 | #include <linux/kernel.h> | 44 | #include <linux/kernel.h> |
| 43 | #include <linux/fb.h> | 45 | #include <linux/fb.h> |
| @@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv) | |||
| 1171 | { | 1173 | { |
| 1172 | int rc; | 1174 | int rc; |
| 1173 | 1175 | ||
| 1176 | #ifdef CONFIG_X86_64 | ||
| 1177 | if (pat_enabled()) { | ||
| 1178 | pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); | ||
| 1179 | return -ENODEV; | ||
| 1180 | } | ||
| 1181 | #endif | ||
| 1182 | |||
| 1174 | if (itv->osd_info) { | 1183 | if (itv->osd_info) { |
| 1175 | IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); | 1184 | IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); |
| 1176 | return -EBUSY; | 1185 | return -EBUSY; |
| @@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void) | |||
| 1265 | int registered = 0; | 1274 | int registered = 0; |
| 1266 | int err; | 1275 | int err; |
| 1267 | 1276 | ||
| 1268 | #ifdef CONFIG_X86_64 | ||
| 1269 | if (WARN(pat_enabled(), | ||
| 1270 | "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { | ||
| 1271 | return -ENODEV; | ||
| 1272 | } | ||
| 1273 | #endif | ||
| 1274 | 1277 | ||
| 1275 | if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { | 1278 | if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { |
| 1276 | printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", | 1279 | printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", |
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c index 1d59c7e039f7..87990ece5848 100644 --- a/drivers/media/pci/mantis/mantis_dma.c +++ b/drivers/media/pci/mantis/mantis_dma.c | |||
| @@ -130,10 +130,11 @@ err: | |||
| 130 | 130 | ||
| 131 | int mantis_dma_init(struct mantis_pci *mantis) | 131 | int mantis_dma_init(struct mantis_pci *mantis) |
| 132 | { | 132 | { |
| 133 | int err = 0; | 133 | int err; |
| 134 | 134 | ||
| 135 | dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); | 135 | dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); |
| 136 | if (mantis_alloc_buffers(mantis) < 0) { | 136 | err = mantis_alloc_buffers(mantis); |
| 137 | if (err < 0) { | ||
| 137 | dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); | 138 | dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); |
| 138 | 139 | ||
| 139 | /* Stop RISC Engine */ | 140 | /* Stop RISC Engine */ |
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c index 8939ebd74391..84fa6e9b59a1 100644 --- a/drivers/media/rc/ir-rc5-decoder.c +++ b/drivers/media/rc/ir-rc5-decoder.c | |||
| @@ -184,125 +184,9 @@ out: | |||
| 184 | return -EINVAL; | 184 | return -EINVAL; |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | static struct ir_raw_timings_manchester ir_rc5_timings = { | ||
| 188 | .leader = RC5_UNIT, | ||
| 189 | .pulse_space_start = 0, | ||
| 190 | .clock = RC5_UNIT, | ||
| 191 | .trailer_space = RC5_UNIT * 10, | ||
| 192 | }; | ||
| 193 | |||
| 194 | static struct ir_raw_timings_manchester ir_rc5x_timings[2] = { | ||
| 195 | { | ||
| 196 | .leader = RC5_UNIT, | ||
| 197 | .pulse_space_start = 0, | ||
| 198 | .clock = RC5_UNIT, | ||
| 199 | .trailer_space = RC5X_SPACE, | ||
| 200 | }, | ||
| 201 | { | ||
| 202 | .clock = RC5_UNIT, | ||
| 203 | .trailer_space = RC5_UNIT * 10, | ||
| 204 | }, | ||
| 205 | }; | ||
| 206 | |||
| 207 | static struct ir_raw_timings_manchester ir_rc5_sz_timings = { | ||
| 208 | .leader = RC5_UNIT, | ||
| 209 | .pulse_space_start = 0, | ||
| 210 | .clock = RC5_UNIT, | ||
| 211 | .trailer_space = RC5_UNIT * 10, | ||
| 212 | }; | ||
| 213 | |||
| 214 | static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode, | ||
| 215 | unsigned int important_bits) | ||
| 216 | { | ||
| 217 | /* all important bits of scancode should be set in mask */ | ||
| 218 | if (~scancode->mask & important_bits) | ||
| 219 | return -EINVAL; | ||
| 220 | /* extra bits in mask should be zero in data */ | ||
| 221 | if (scancode->mask & scancode->data & ~important_bits) | ||
| 222 | return -EINVAL; | ||
| 223 | return 0; | ||
| 224 | } | ||
| 225 | |||
| 226 | /** | ||
| 227 | * ir_rc5_encode() - Encode a scancode as a stream of raw events | ||
| 228 | * | ||
| 229 | * @protocols: allowed protocols | ||
| 230 | * @scancode: scancode filter describing scancode (helps distinguish between | ||
| 231 | * protocol subtypes when scancode is ambiguous) | ||
| 232 | * @events: array of raw ir events to write into | ||
| 233 | * @max: maximum size of @events | ||
| 234 | * | ||
| 235 | * Returns: The number of events written. | ||
| 236 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
| 237 | * encoding. In this case all @max events will have been written. | ||
| 238 | * -EINVAL if the scancode is ambiguous or invalid. | ||
| 239 | */ | ||
| 240 | static int ir_rc5_encode(u64 protocols, | ||
| 241 | const struct rc_scancode_filter *scancode, | ||
| 242 | struct ir_raw_event *events, unsigned int max) | ||
| 243 | { | ||
| 244 | int ret; | ||
| 245 | struct ir_raw_event *e = events; | ||
| 246 | unsigned int data, xdata, command, commandx, system; | ||
| 247 | |||
| 248 | /* Detect protocol and convert scancode to raw data */ | ||
| 249 | if (protocols & RC_BIT_RC5 && | ||
| 250 | !ir_rc5_validate_filter(scancode, 0x1f7f)) { | ||
| 251 | /* decode scancode */ | ||
| 252 | command = (scancode->data & 0x003f) >> 0; | ||
| 253 | commandx = (scancode->data & 0x0040) >> 6; | ||
| 254 | system = (scancode->data & 0x1f00) >> 8; | ||
| 255 | /* encode data */ | ||
| 256 | data = !commandx << 12 | system << 6 | command; | ||
| 257 | |||
| 258 | /* Modulate the data */ | ||
| 259 | ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS, | ||
| 260 | data); | ||
| 261 | if (ret < 0) | ||
| 262 | return ret; | ||
| 263 | } else if (protocols & RC_BIT_RC5X && | ||
| 264 | !ir_rc5_validate_filter(scancode, 0x1f7f3f)) { | ||
| 265 | /* decode scancode */ | ||
| 266 | xdata = (scancode->data & 0x00003f) >> 0; | ||
| 267 | command = (scancode->data & 0x003f00) >> 8; | ||
| 268 | commandx = (scancode->data & 0x004000) >> 14; | ||
| 269 | system = (scancode->data & 0x1f0000) >> 16; | ||
| 270 | /* commandx and system overlap, bits must match when encoded */ | ||
| 271 | if (commandx == (system & 0x1)) | ||
| 272 | return -EINVAL; | ||
| 273 | /* encode data */ | ||
| 274 | data = 1 << 18 | system << 12 | command << 6 | xdata; | ||
| 275 | |||
| 276 | /* Modulate the data */ | ||
| 277 | ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0], | ||
| 278 | CHECK_RC5X_NBITS, | ||
| 279 | data >> (RC5X_NBITS-CHECK_RC5X_NBITS)); | ||
| 280 | if (ret < 0) | ||
| 281 | return ret; | ||
| 282 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
| 283 | &ir_rc5x_timings[1], | ||
| 284 | RC5X_NBITS - CHECK_RC5X_NBITS, | ||
| 285 | data); | ||
| 286 | if (ret < 0) | ||
| 287 | return ret; | ||
| 288 | } else if (protocols & RC_BIT_RC5_SZ && | ||
| 289 | !ir_rc5_validate_filter(scancode, 0x2fff)) { | ||
| 290 | /* RC5-SZ scancode is raw enough for Manchester as it is */ | ||
| 291 | ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings, | ||
| 292 | RC5_SZ_NBITS, scancode->data & 0x2fff); | ||
| 293 | if (ret < 0) | ||
| 294 | return ret; | ||
| 295 | } else { | ||
| 296 | return -EINVAL; | ||
| 297 | } | ||
| 298 | |||
| 299 | return e - events; | ||
| 300 | } | ||
| 301 | |||
| 302 | static struct ir_raw_handler rc5_handler = { | 187 | static struct ir_raw_handler rc5_handler = { |
| 303 | .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, | 188 | .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, |
| 304 | .decode = ir_rc5_decode, | 189 | .decode = ir_rc5_decode, |
| 305 | .encode = ir_rc5_encode, | ||
| 306 | }; | 190 | }; |
| 307 | 191 | ||
| 308 | static int __init ir_rc5_decode_init(void) | 192 | static int __init ir_rc5_decode_init(void) |
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index f9c70baf6e0c..d16bc67af732 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c | |||
| @@ -291,133 +291,11 @@ out: | |||
| 291 | return -EINVAL; | 291 | return -EINVAL; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | static struct ir_raw_timings_manchester ir_rc6_timings[4] = { | ||
| 295 | { | ||
| 296 | .leader = RC6_PREFIX_PULSE, | ||
| 297 | .pulse_space_start = 0, | ||
| 298 | .clock = RC6_UNIT, | ||
| 299 | .invert = 1, | ||
| 300 | .trailer_space = RC6_PREFIX_SPACE, | ||
| 301 | }, | ||
| 302 | { | ||
| 303 | .clock = RC6_UNIT, | ||
| 304 | .invert = 1, | ||
| 305 | }, | ||
| 306 | { | ||
| 307 | .clock = RC6_UNIT * 2, | ||
| 308 | .invert = 1, | ||
| 309 | }, | ||
| 310 | { | ||
| 311 | .clock = RC6_UNIT, | ||
| 312 | .invert = 1, | ||
| 313 | .trailer_space = RC6_SUFFIX_SPACE, | ||
| 314 | }, | ||
| 315 | }; | ||
| 316 | |||
| 317 | static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode, | ||
| 318 | unsigned int important_bits) | ||
| 319 | { | ||
| 320 | /* all important bits of scancode should be set in mask */ | ||
| 321 | if (~scancode->mask & important_bits) | ||
| 322 | return -EINVAL; | ||
| 323 | /* extra bits in mask should be zero in data */ | ||
| 324 | if (scancode->mask & scancode->data & ~important_bits) | ||
| 325 | return -EINVAL; | ||
| 326 | return 0; | ||
| 327 | } | ||
| 328 | |||
| 329 | /** | ||
| 330 | * ir_rc6_encode() - Encode a scancode as a stream of raw events | ||
| 331 | * | ||
| 332 | * @protocols: allowed protocols | ||
| 333 | * @scancode: scancode filter describing scancode (helps distinguish between | ||
| 334 | * protocol subtypes when scancode is ambiguous) | ||
| 335 | * @events: array of raw ir events to write into | ||
| 336 | * @max: maximum size of @events | ||
| 337 | * | ||
| 338 | * Returns: The number of events written. | ||
| 339 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
| 340 | * encoding. In this case all @max events will have been written. | ||
| 341 | * -EINVAL if the scancode is ambiguous or invalid. | ||
| 342 | */ | ||
| 343 | static int ir_rc6_encode(u64 protocols, | ||
| 344 | const struct rc_scancode_filter *scancode, | ||
| 345 | struct ir_raw_event *events, unsigned int max) | ||
| 346 | { | ||
| 347 | int ret; | ||
| 348 | struct ir_raw_event *e = events; | ||
| 349 | |||
| 350 | if (protocols & RC_BIT_RC6_0 && | ||
| 351 | !ir_rc6_validate_filter(scancode, 0xffff)) { | ||
| 352 | |||
| 353 | /* Modulate the preamble */ | ||
| 354 | ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0); | ||
| 355 | if (ret < 0) | ||
| 356 | return ret; | ||
| 357 | |||
| 358 | /* Modulate the header (Start Bit & Mode-0) */ | ||
| 359 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
| 360 | &ir_rc6_timings[1], | ||
| 361 | RC6_HEADER_NBITS, (1 << 3)); | ||
| 362 | if (ret < 0) | ||
| 363 | return ret; | ||
| 364 | |||
| 365 | /* Modulate Trailer Bit */ | ||
| 366 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
| 367 | &ir_rc6_timings[2], 1, 0); | ||
| 368 | if (ret < 0) | ||
| 369 | return ret; | ||
| 370 | |||
| 371 | /* Modulate rest of the data */ | ||
| 372 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
| 373 | &ir_rc6_timings[3], RC6_0_NBITS, | ||
| 374 | scancode->data); | ||
| 375 | if (ret < 0) | ||
| 376 | return ret; | ||
| 377 | |||
| 378 | } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | | ||
| 379 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) && | ||
| 380 | !ir_rc6_validate_filter(scancode, 0x8fffffff)) { | ||
| 381 | |||
| 382 | /* Modulate the preamble */ | ||
| 383 | ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0); | ||
| 384 | if (ret < 0) | ||
| 385 | return ret; | ||
| 386 | |||
| 387 | /* Modulate the header (Start Bit & Header-version 6 */ | ||
| 388 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
| 389 | &ir_rc6_timings[1], | ||
| 390 | RC6_HEADER_NBITS, (1 << 3 | 6)); | ||
| 391 | if (ret < 0) | ||
| 392 | return ret; | ||
| 393 | |||
| 394 | /* Modulate Trailer Bit */ | ||
| 395 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
| 396 | &ir_rc6_timings[2], 1, 0); | ||
| 397 | if (ret < 0) | ||
| 398 | return ret; | ||
| 399 | |||
| 400 | /* Modulate rest of the data */ | ||
| 401 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
| 402 | &ir_rc6_timings[3], | ||
| 403 | fls(scancode->mask), | ||
| 404 | scancode->data); | ||
| 405 | if (ret < 0) | ||
| 406 | return ret; | ||
| 407 | |||
| 408 | } else { | ||
| 409 | return -EINVAL; | ||
| 410 | } | ||
| 411 | |||
| 412 | return e - events; | ||
| 413 | } | ||
| 414 | |||
| 415 | static struct ir_raw_handler rc6_handler = { | 294 | static struct ir_raw_handler rc6_handler = { |
| 416 | .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | | 295 | .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | |
| 417 | RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | | 296 | RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | |
| 418 | RC_BIT_RC6_MCE, | 297 | RC_BIT_RC6_MCE, |
| 419 | .decode = ir_rc6_decode, | 298 | .decode = ir_rc6_decode, |
| 420 | .encode = ir_rc6_encode, | ||
| 421 | }; | 299 | }; |
| 422 | 300 | ||
| 423 | static int __init ir_rc6_decode_init(void) | 301 | static int __init ir_rc6_decode_init(void) |
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index baeb5971fd52..85af7a869167 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c | |||
| @@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier) | |||
| 526 | return 0; | 526 | return 0; |
| 527 | } | 527 | } |
| 528 | 528 | ||
| 529 | static int nvt_write_wakeup_codes(struct rc_dev *dev, | ||
| 530 | const u8 *wakeup_sample_buf, int count) | ||
| 531 | { | ||
| 532 | int i = 0; | ||
| 533 | u8 reg, reg_learn_mode; | ||
| 534 | unsigned long flags; | ||
| 535 | struct nvt_dev *nvt = dev->priv; | ||
| 536 | |||
| 537 | nvt_dbg_wake("writing wakeup samples"); | ||
| 538 | |||
| 539 | reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON); | ||
| 540 | reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0; | ||
| 541 | reg_learn_mode |= CIR_WAKE_IRCON_MODE1; | ||
| 542 | |||
| 543 | /* Lock the learn area to prevent racing with wake-isr */ | ||
| 544 | spin_lock_irqsave(&nvt->nvt_lock, flags); | ||
| 545 | |||
| 546 | /* Enable fifo writes */ | ||
| 547 | nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON); | ||
| 548 | |||
| 549 | /* Clear cir wake rx fifo */ | ||
| 550 | nvt_clear_cir_wake_fifo(nvt); | ||
| 551 | |||
| 552 | if (count > WAKE_FIFO_LEN) { | ||
| 553 | nvt_dbg_wake("HW FIFO too small for all wake samples"); | ||
| 554 | count = WAKE_FIFO_LEN; | ||
| 555 | } | ||
| 556 | |||
| 557 | if (count) | ||
| 558 | pr_info("Wake samples (%d) =", count); | ||
| 559 | else | ||
| 560 | pr_info("Wake sample fifo cleared"); | ||
| 561 | |||
| 562 | /* Write wake samples to fifo */ | ||
| 563 | for (i = 0; i < count; i++) { | ||
| 564 | pr_cont(" %02x", wakeup_sample_buf[i]); | ||
| 565 | nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i], | ||
| 566 | CIR_WAKE_WR_FIFO_DATA); | ||
| 567 | } | ||
| 568 | pr_cont("\n"); | ||
| 569 | |||
| 570 | /* Switch cir to wakeup mode and disable fifo writing */ | ||
| 571 | nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON); | ||
| 572 | |||
| 573 | /* Set number of bytes needed for wake */ | ||
| 574 | nvt_cir_wake_reg_write(nvt, count ? count : | ||
| 575 | CIR_WAKE_FIFO_CMP_BYTES, | ||
| 576 | CIR_WAKE_FIFO_CMP_DEEP); | ||
| 577 | |||
| 578 | spin_unlock_irqrestore(&nvt->nvt_lock, flags); | ||
| 579 | |||
| 580 | return 0; | ||
| 581 | } | ||
| 582 | |||
| 583 | static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev, | ||
| 584 | struct rc_scancode_filter *sc_filter) | ||
| 585 | { | ||
| 586 | u8 *reg_buf; | ||
| 587 | u8 buf_val; | ||
| 588 | int i, ret, count; | ||
| 589 | unsigned int val; | ||
| 590 | struct ir_raw_event *raw; | ||
| 591 | bool complete; | ||
| 592 | |||
| 593 | /* Require both mask and data to be set before actually committing */ | ||
| 594 | if (!sc_filter->mask || !sc_filter->data) | ||
| 595 | return 0; | ||
| 596 | |||
| 597 | raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL); | ||
| 598 | if (!raw) | ||
| 599 | return -ENOMEM; | ||
| 600 | |||
| 601 | ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter, | ||
| 602 | raw, WAKE_FIFO_LEN); | ||
| 603 | complete = (ret != -ENOBUFS); | ||
| 604 | if (!complete) | ||
| 605 | ret = WAKE_FIFO_LEN; | ||
| 606 | else if (ret < 0) | ||
| 607 | goto out_raw; | ||
| 608 | |||
| 609 | reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL); | ||
| 610 | if (!reg_buf) { | ||
| 611 | ret = -ENOMEM; | ||
| 612 | goto out_raw; | ||
| 613 | } | ||
| 614 | |||
| 615 | /* Inspect the ir samples */ | ||
| 616 | for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) { | ||
| 617 | val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD; | ||
| 618 | |||
| 619 | /* Split too large values into several smaller ones */ | ||
| 620 | while (val > 0 && count < WAKE_FIFO_LEN) { | ||
| 621 | |||
| 622 | /* Skip last value for better comparison tolerance */ | ||
| 623 | if (complete && i == ret - 1 && val < BUF_LEN_MASK) | ||
| 624 | break; | ||
| 625 | |||
| 626 | /* Clamp values to BUF_LEN_MASK at most */ | ||
| 627 | buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val; | ||
| 628 | |||
| 629 | reg_buf[count] = buf_val; | ||
| 630 | val -= buf_val; | ||
| 631 | if ((raw[i]).pulse) | ||
| 632 | reg_buf[count] |= BUF_PULSE_BIT; | ||
| 633 | count++; | ||
| 634 | } | ||
| 635 | } | ||
| 636 | |||
| 637 | ret = nvt_write_wakeup_codes(dev, reg_buf, count); | ||
| 638 | |||
| 639 | kfree(reg_buf); | ||
| 640 | out_raw: | ||
| 641 | kfree(raw); | ||
| 642 | |||
| 643 | return ret; | ||
| 644 | } | ||
| 645 | |||
| 646 | /* Dummy implementation. nuvoton is agnostic to the protocol used */ | ||
| 647 | static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev, | ||
| 648 | u64 *rc_type) | ||
| 649 | { | ||
| 650 | return 0; | ||
| 651 | } | ||
| 652 | |||
| 653 | /* | 529 | /* |
| 654 | * nvt_tx_ir | 530 | * nvt_tx_ir |
| 655 | * | 531 | * |
| @@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) | |||
| 1167 | /* Set up the rc device */ | 1043 | /* Set up the rc device */ |
| 1168 | rdev->priv = nvt; | 1044 | rdev->priv = nvt; |
| 1169 | rdev->driver_type = RC_DRIVER_IR_RAW; | 1045 | rdev->driver_type = RC_DRIVER_IR_RAW; |
| 1170 | rdev->encode_wakeup = true; | ||
| 1171 | rdev->allowed_protocols = RC_BIT_ALL; | 1046 | rdev->allowed_protocols = RC_BIT_ALL; |
| 1172 | rdev->open = nvt_open; | 1047 | rdev->open = nvt_open; |
| 1173 | rdev->close = nvt_close; | 1048 | rdev->close = nvt_close; |
| 1174 | rdev->tx_ir = nvt_tx_ir; | 1049 | rdev->tx_ir = nvt_tx_ir; |
| 1175 | rdev->s_tx_carrier = nvt_set_tx_carrier; | 1050 | rdev->s_tx_carrier = nvt_set_tx_carrier; |
| 1176 | rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter; | ||
| 1177 | rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol; | ||
| 1178 | rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; | 1051 | rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; |
| 1179 | rdev->input_phys = "nuvoton/cir0"; | 1052 | rdev->input_phys = "nuvoton/cir0"; |
| 1180 | rdev->input_id.bustype = BUS_HOST; | 1053 | rdev->input_id.bustype = BUS_HOST; |
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 9d0e161c2a88..e1cf23c3875b 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h | |||
| @@ -63,7 +63,6 @@ static int debug; | |||
| 63 | */ | 63 | */ |
| 64 | #define TX_BUF_LEN 256 | 64 | #define TX_BUF_LEN 256 |
| 65 | #define RX_BUF_LEN 32 | 65 | #define RX_BUF_LEN 32 |
| 66 | #define WAKE_FIFO_LEN 67 | ||
| 67 | 66 | ||
| 68 | struct nvt_dev { | 67 | struct nvt_dev { |
| 69 | struct pnp_dev *pdev; | 68 | struct pnp_dev *pdev; |
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index 4b994aa2f2a7..b68d4f762734 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h | |||
| @@ -25,8 +25,6 @@ struct ir_raw_handler { | |||
| 25 | 25 | ||
| 26 | u64 protocols; /* which are handled by this handler */ | 26 | u64 protocols; /* which are handled by this handler */ |
| 27 | int (*decode)(struct rc_dev *dev, struct ir_raw_event event); | 27 | int (*decode)(struct rc_dev *dev, struct ir_raw_event event); |
| 28 | int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode, | ||
| 29 | struct ir_raw_event *events, unsigned int max); | ||
| 30 | 28 | ||
| 31 | /* These two should only be used by the lirc decoder */ | 29 | /* These two should only be used by the lirc decoder */ |
| 32 | int (*raw_register)(struct rc_dev *dev); | 30 | int (*raw_register)(struct rc_dev *dev); |
| @@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev) | |||
| 152 | #define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000) | 150 | #define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000) |
| 153 | #define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space") | 151 | #define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space") |
| 154 | 152 | ||
| 155 | /* functions for IR encoders */ | ||
| 156 | |||
| 157 | static inline void init_ir_raw_event_duration(struct ir_raw_event *ev, | ||
| 158 | unsigned int pulse, | ||
| 159 | u32 duration) | ||
| 160 | { | ||
| 161 | init_ir_raw_event(ev); | ||
| 162 | ev->duration = duration; | ||
| 163 | ev->pulse = pulse; | ||
| 164 | } | ||
| 165 | |||
| 166 | /** | ||
| 167 | * struct ir_raw_timings_manchester - Manchester coding timings | ||
| 168 | * @leader: duration of leader pulse (if any) 0 if continuing | ||
| 169 | * existing signal (see @pulse_space_start) | ||
| 170 | * @pulse_space_start: 1 for starting with pulse (0 for starting with space) | ||
| 171 | * @clock: duration of each pulse/space in ns | ||
| 172 | * @invert: if set clock logic is inverted | ||
| 173 | * (0 = space + pulse, 1 = pulse + space) | ||
| 174 | * @trailer_space: duration of trailer space in ns | ||
| 175 | */ | ||
| 176 | struct ir_raw_timings_manchester { | ||
| 177 | unsigned int leader; | ||
| 178 | unsigned int pulse_space_start:1; | ||
| 179 | unsigned int clock; | ||
| 180 | unsigned int invert:1; | ||
| 181 | unsigned int trailer_space; | ||
| 182 | }; | ||
| 183 | |||
| 184 | int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max, | ||
| 185 | const struct ir_raw_timings_manchester *timings, | ||
| 186 | unsigned int n, unsigned int data); | ||
| 187 | |||
| 188 | /* | 153 | /* |
| 189 | * Routines from rc-raw.c to be used internally and by decoders | 154 | * Routines from rc-raw.c to be used internally and by decoders |
| 190 | */ | 155 | */ |
| 191 | u64 ir_raw_get_allowed_protocols(void); | 156 | u64 ir_raw_get_allowed_protocols(void); |
| 192 | u64 ir_raw_get_encode_protocols(void); | ||
| 193 | int ir_raw_event_register(struct rc_dev *dev); | 157 | int ir_raw_event_register(struct rc_dev *dev); |
| 194 | void ir_raw_event_unregister(struct rc_dev *dev); | 158 | void ir_raw_event_unregister(struct rc_dev *dev); |
| 195 | int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler); | 159 | int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler); |
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index b9e4645c731c..b732ac6a26d8 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c | |||
| @@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list); | |||
| 30 | static DEFINE_MUTEX(ir_raw_handler_lock); | 30 | static DEFINE_MUTEX(ir_raw_handler_lock); |
| 31 | static LIST_HEAD(ir_raw_handler_list); | 31 | static LIST_HEAD(ir_raw_handler_list); |
| 32 | static u64 available_protocols; | 32 | static u64 available_protocols; |
| 33 | static u64 encode_protocols; | ||
| 34 | 33 | ||
| 35 | static int ir_raw_event_thread(void *data) | 34 | static int ir_raw_event_thread(void *data) |
| 36 | { | 35 | { |
| @@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void) | |||
| 241 | return protocols; | 240 | return protocols; |
| 242 | } | 241 | } |
| 243 | 242 | ||
| 244 | /* used internally by the sysfs interface */ | ||
| 245 | u64 | ||
| 246 | ir_raw_get_encode_protocols(void) | ||
| 247 | { | ||
| 248 | u64 protocols; | ||
| 249 | |||
| 250 | mutex_lock(&ir_raw_handler_lock); | ||
| 251 | protocols = encode_protocols; | ||
| 252 | mutex_unlock(&ir_raw_handler_lock); | ||
| 253 | return protocols; | ||
| 254 | } | ||
| 255 | |||
| 256 | static int change_protocol(struct rc_dev *dev, u64 *rc_type) | 243 | static int change_protocol(struct rc_dev *dev, u64 *rc_type) |
| 257 | { | 244 | { |
| 258 | /* the caller will update dev->enabled_protocols */ | 245 | /* the caller will update dev->enabled_protocols */ |
| 259 | return 0; | 246 | return 0; |
| 260 | } | 247 | } |
| 261 | 248 | ||
| 262 | /** | ||
| 263 | * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation. | ||
| 264 | * @ev: Pointer to pointer to next free event. *@ev is incremented for | ||
| 265 | * each raw event filled. | ||
| 266 | * @max: Maximum number of raw events to fill. | ||
| 267 | * @timings: Manchester modulation timings. | ||
| 268 | * @n: Number of bits of data. | ||
| 269 | * @data: Data bits to encode. | ||
| 270 | * | ||
| 271 | * Encodes the @n least significant bits of @data using Manchester (bi-phase) | ||
| 272 | * modulation with the timing characteristics described by @timings, writing up | ||
| 273 | * to @max raw IR events using the *@ev pointer. | ||
| 274 | * | ||
| 275 | * Returns: 0 on success. | ||
| 276 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
| 277 | * full encoded data. In this case all @max events will have been | ||
| 278 | * written. | ||
| 279 | */ | ||
| 280 | int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max, | ||
| 281 | const struct ir_raw_timings_manchester *timings, | ||
| 282 | unsigned int n, unsigned int data) | ||
| 283 | { | ||
| 284 | bool need_pulse; | ||
| 285 | unsigned int i; | ||
| 286 | int ret = -ENOBUFS; | ||
| 287 | |||
| 288 | i = 1 << (n - 1); | ||
| 289 | |||
| 290 | if (timings->leader) { | ||
| 291 | if (!max--) | ||
| 292 | return ret; | ||
| 293 | if (timings->pulse_space_start) { | ||
| 294 | init_ir_raw_event_duration((*ev)++, 1, timings->leader); | ||
| 295 | |||
| 296 | if (!max--) | ||
| 297 | return ret; | ||
| 298 | init_ir_raw_event_duration((*ev), 0, timings->leader); | ||
| 299 | } else { | ||
| 300 | init_ir_raw_event_duration((*ev), 1, timings->leader); | ||
| 301 | } | ||
| 302 | i >>= 1; | ||
| 303 | } else { | ||
| 304 | /* continue existing signal */ | ||
| 305 | --(*ev); | ||
| 306 | } | ||
| 307 | /* from here on *ev will point to the last event rather than the next */ | ||
| 308 | |||
| 309 | while (n && i > 0) { | ||
| 310 | need_pulse = !(data & i); | ||
| 311 | if (timings->invert) | ||
| 312 | need_pulse = !need_pulse; | ||
| 313 | if (need_pulse == !!(*ev)->pulse) { | ||
| 314 | (*ev)->duration += timings->clock; | ||
| 315 | } else { | ||
| 316 | if (!max--) | ||
| 317 | goto nobufs; | ||
| 318 | init_ir_raw_event_duration(++(*ev), need_pulse, | ||
| 319 | timings->clock); | ||
| 320 | } | ||
| 321 | |||
| 322 | if (!max--) | ||
| 323 | goto nobufs; | ||
| 324 | init_ir_raw_event_duration(++(*ev), !need_pulse, | ||
| 325 | timings->clock); | ||
| 326 | i >>= 1; | ||
| 327 | } | ||
| 328 | |||
| 329 | if (timings->trailer_space) { | ||
| 330 | if (!(*ev)->pulse) | ||
| 331 | (*ev)->duration += timings->trailer_space; | ||
| 332 | else if (!max--) | ||
| 333 | goto nobufs; | ||
| 334 | else | ||
| 335 | init_ir_raw_event_duration(++(*ev), 0, | ||
| 336 | timings->trailer_space); | ||
| 337 | } | ||
| 338 | |||
| 339 | ret = 0; | ||
| 340 | nobufs: | ||
| 341 | /* point to the next event rather than last event before returning */ | ||
| 342 | ++(*ev); | ||
| 343 | return ret; | ||
| 344 | } | ||
| 345 | EXPORT_SYMBOL(ir_raw_gen_manchester); | ||
| 346 | |||
| 347 | /** | ||
| 348 | * ir_raw_encode_scancode() - Encode a scancode as raw events | ||
| 349 | * | ||
| 350 | * @protocols: permitted protocols | ||
| 351 | * @scancode: scancode filter describing a single scancode | ||
| 352 | * @events: array of raw events to write into | ||
| 353 | * @max: max number of raw events | ||
| 354 | * | ||
| 355 | * Attempts to encode the scancode as raw events. | ||
| 356 | * | ||
| 357 | * Returns: The number of events written. | ||
| 358 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
| 359 | * encoding. In this case all @max events will have been written. | ||
| 360 | * -EINVAL if the scancode is ambiguous or invalid, or if no | ||
| 361 | * compatible encoder was found. | ||
| 362 | */ | ||
| 363 | int ir_raw_encode_scancode(u64 protocols, | ||
| 364 | const struct rc_scancode_filter *scancode, | ||
| 365 | struct ir_raw_event *events, unsigned int max) | ||
| 366 | { | ||
| 367 | struct ir_raw_handler *handler; | ||
| 368 | int ret = -EINVAL; | ||
| 369 | |||
| 370 | mutex_lock(&ir_raw_handler_lock); | ||
| 371 | list_for_each_entry(handler, &ir_raw_handler_list, list) { | ||
| 372 | if (handler->protocols & protocols && handler->encode) { | ||
| 373 | ret = handler->encode(protocols, scancode, events, max); | ||
| 374 | if (ret >= 0 || ret == -ENOBUFS) | ||
| 375 | break; | ||
| 376 | } | ||
| 377 | } | ||
| 378 | mutex_unlock(&ir_raw_handler_lock); | ||
| 379 | |||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | EXPORT_SYMBOL(ir_raw_encode_scancode); | ||
| 383 | |||
| 384 | /* | 249 | /* |
| 385 | * Used to (un)register raw event clients | 250 | * Used to (un)register raw event clients |
| 386 | */ | 251 | */ |
| @@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler) | |||
| 463 | list_for_each_entry(raw, &ir_raw_client_list, list) | 328 | list_for_each_entry(raw, &ir_raw_client_list, list) |
| 464 | ir_raw_handler->raw_register(raw->dev); | 329 | ir_raw_handler->raw_register(raw->dev); |
| 465 | available_protocols |= ir_raw_handler->protocols; | 330 | available_protocols |= ir_raw_handler->protocols; |
| 466 | if (ir_raw_handler->encode) | ||
| 467 | encode_protocols |= ir_raw_handler->protocols; | ||
| 468 | mutex_unlock(&ir_raw_handler_lock); | 331 | mutex_unlock(&ir_raw_handler_lock); |
| 469 | 332 | ||
| 470 | return 0; | 333 | return 0; |
| @@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) | |||
| 481 | list_for_each_entry(raw, &ir_raw_client_list, list) | 344 | list_for_each_entry(raw, &ir_raw_client_list, list) |
| 482 | ir_raw_handler->raw_unregister(raw->dev); | 345 | ir_raw_handler->raw_unregister(raw->dev); |
| 483 | available_protocols &= ~ir_raw_handler->protocols; | 346 | available_protocols &= ~ir_raw_handler->protocols; |
| 484 | if (ir_raw_handler->encode) | ||
| 485 | encode_protocols &= ~ir_raw_handler->protocols; | ||
| 486 | mutex_unlock(&ir_raw_handler_lock); | 347 | mutex_unlock(&ir_raw_handler_lock); |
| 487 | } | 348 | } |
| 488 | EXPORT_SYMBOL(ir_raw_handler_unregister); | 349 | EXPORT_SYMBOL(ir_raw_handler_unregister); |
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c index d8bdf63ce985..63dace8198b0 100644 --- a/drivers/media/rc/rc-loopback.c +++ b/drivers/media/rc/rc-loopback.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
| 29 | #include <linux/slab.h> | ||
| 30 | #include <media/rc-core.h> | 29 | #include <media/rc-core.h> |
| 31 | 30 | ||
| 32 | #define DRIVER_NAME "rc-loopback" | 31 | #define DRIVER_NAME "rc-loopback" |
| @@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable) | |||
| 177 | return 0; | 176 | return 0; |
| 178 | } | 177 | } |
| 179 | 178 | ||
| 180 | static int loop_set_wakeup_filter(struct rc_dev *dev, | ||
| 181 | struct rc_scancode_filter *sc_filter) | ||
| 182 | { | ||
| 183 | static const unsigned int max = 512; | ||
| 184 | struct ir_raw_event *raw; | ||
| 185 | int ret; | ||
| 186 | int i; | ||
| 187 | |||
| 188 | /* fine to disable filter */ | ||
| 189 | if (!sc_filter->mask) | ||
| 190 | return 0; | ||
| 191 | |||
| 192 | /* encode the specified filter and loop it back */ | ||
| 193 | raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL); | ||
| 194 | ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter, | ||
| 195 | raw, max); | ||
| 196 | /* still loop back the partial raw IR even if it's incomplete */ | ||
| 197 | if (ret == -ENOBUFS) | ||
| 198 | ret = max; | ||
| 199 | if (ret >= 0) { | ||
| 200 | /* do the loopback */ | ||
| 201 | for (i = 0; i < ret; ++i) | ||
| 202 | ir_raw_event_store(dev, &raw[i]); | ||
| 203 | ir_raw_event_handle(dev); | ||
| 204 | |||
| 205 | ret = 0; | ||
| 206 | } | ||
| 207 | |||
| 208 | kfree(raw); | ||
| 209 | |||
| 210 | return ret; | ||
| 211 | } | ||
| 212 | |||
| 213 | static int __init loop_init(void) | 179 | static int __init loop_init(void) |
| 214 | { | 180 | { |
| 215 | struct rc_dev *rc; | 181 | struct rc_dev *rc; |
| @@ -229,7 +195,6 @@ static int __init loop_init(void) | |||
| 229 | rc->map_name = RC_MAP_EMPTY; | 195 | rc->map_name = RC_MAP_EMPTY; |
| 230 | rc->priv = &loopdev; | 196 | rc->priv = &loopdev; |
| 231 | rc->driver_type = RC_DRIVER_IR_RAW; | 197 | rc->driver_type = RC_DRIVER_IR_RAW; |
| 232 | rc->encode_wakeup = true; | ||
| 233 | rc->allowed_protocols = RC_BIT_ALL; | 198 | rc->allowed_protocols = RC_BIT_ALL; |
| 234 | rc->timeout = 100 * 1000 * 1000; /* 100 ms */ | 199 | rc->timeout = 100 * 1000 * 1000; /* 100 ms */ |
| 235 | rc->min_timeout = 1; | 200 | rc->min_timeout = 1; |
| @@ -244,7 +209,6 @@ static int __init loop_init(void) | |||
| 244 | rc->s_idle = loop_set_idle; | 209 | rc->s_idle = loop_set_idle; |
| 245 | rc->s_learning_mode = loop_set_learning_mode; | 210 | rc->s_learning_mode = loop_set_learning_mode; |
| 246 | rc->s_carrier_report = loop_set_carrier_report; | 211 | rc->s_carrier_report = loop_set_carrier_report; |
| 247 | rc->s_wakeup_filter = loop_set_wakeup_filter; | ||
| 248 | 212 | ||
| 249 | loopdev.txmask = RXMASK_REGULAR; | 213 | loopdev.txmask = RXMASK_REGULAR; |
| 250 | loopdev.txcarrier = 36000; | 214 | loopdev.txcarrier = 36000; |
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 9d015db65280..0ff388a16168 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c | |||
| @@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device, | |||
| 865 | } else { | 865 | } else { |
| 866 | enabled = dev->enabled_wakeup_protocols; | 866 | enabled = dev->enabled_wakeup_protocols; |
| 867 | allowed = dev->allowed_wakeup_protocols; | 867 | allowed = dev->allowed_wakeup_protocols; |
| 868 | if (dev->encode_wakeup && !allowed) | ||
| 869 | allowed = ir_raw_get_encode_protocols(); | ||
| 870 | } | 868 | } |
| 871 | 869 | ||
| 872 | mutex_unlock(&dev->lock); | 870 | mutex_unlock(&dev->lock); |
| @@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev) | |||
| 1408 | path ? path : "N/A"); | 1406 | path ? path : "N/A"); |
| 1409 | kfree(path); | 1407 | kfree(path); |
| 1410 | 1408 | ||
| 1411 | if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) { | 1409 | if (dev->driver_type == RC_DRIVER_IR_RAW) { |
| 1412 | /* Load raw decoders, if they aren't already */ | 1410 | /* Load raw decoders, if they aren't already */ |
| 1413 | if (!raw_init) { | 1411 | if (!raw_init) { |
| 1414 | IR_dprintk(1, "Loading raw decoders\n"); | 1412 | IR_dprintk(1, "Loading raw decoders\n"); |
| 1415 | ir_raw_init(); | 1413 | ir_raw_init(); |
| 1416 | raw_init = true; | 1414 | raw_init = true; |
| 1417 | } | 1415 | } |
| 1418 | } | ||
| 1419 | |||
| 1420 | if (dev->driver_type == RC_DRIVER_IR_RAW) { | ||
| 1421 | /* calls ir_register_device so unlock mutex here*/ | 1416 | /* calls ir_register_device so unlock mutex here*/ |
| 1422 | mutex_unlock(&dev->lock); | 1417 | mutex_unlock(&dev->lock); |
| 1423 | rc = ir_raw_event_register(dev); | 1418 | rc = ir_raw_event_register(dev); |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 93b315459098..a14c428f70e9 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
| @@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) | |||
| 715 | break; | 715 | break; |
| 716 | case VB2_BUF_STATE_PREPARING: | 716 | case VB2_BUF_STATE_PREPARING: |
| 717 | case VB2_BUF_STATE_DEQUEUED: | 717 | case VB2_BUF_STATE_DEQUEUED: |
| 718 | case VB2_BUF_STATE_REQUEUEING: | ||
| 718 | /* nothing */ | 719 | /* nothing */ |
| 719 | break; | 720 | break; |
| 720 | } | 721 | } |
| @@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
| 1182 | 1183 | ||
| 1183 | if (WARN_ON(state != VB2_BUF_STATE_DONE && | 1184 | if (WARN_ON(state != VB2_BUF_STATE_DONE && |
| 1184 | state != VB2_BUF_STATE_ERROR && | 1185 | state != VB2_BUF_STATE_ERROR && |
| 1185 | state != VB2_BUF_STATE_QUEUED)) | 1186 | state != VB2_BUF_STATE_QUEUED && |
| 1187 | state != VB2_BUF_STATE_REQUEUEING)) | ||
| 1186 | state = VB2_BUF_STATE_ERROR; | 1188 | state = VB2_BUF_STATE_ERROR; |
| 1187 | 1189 | ||
| 1188 | #ifdef CONFIG_VIDEO_ADV_DEBUG | 1190 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
| @@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
| 1199 | for (plane = 0; plane < vb->num_planes; ++plane) | 1201 | for (plane = 0; plane < vb->num_planes; ++plane) |
| 1200 | call_void_memop(vb, finish, vb->planes[plane].mem_priv); | 1202 | call_void_memop(vb, finish, vb->planes[plane].mem_priv); |
| 1201 | 1203 | ||
| 1202 | /* Add the buffer to the done buffers list */ | ||
| 1203 | spin_lock_irqsave(&q->done_lock, flags); | 1204 | spin_lock_irqsave(&q->done_lock, flags); |
| 1204 | vb->state = state; | 1205 | if (state == VB2_BUF_STATE_QUEUED || |
| 1205 | if (state != VB2_BUF_STATE_QUEUED) | 1206 | state == VB2_BUF_STATE_REQUEUEING) { |
| 1207 | vb->state = VB2_BUF_STATE_QUEUED; | ||
| 1208 | } else { | ||
| 1209 | /* Add the buffer to the done buffers list */ | ||
| 1206 | list_add_tail(&vb->done_entry, &q->done_list); | 1210 | list_add_tail(&vb->done_entry, &q->done_list); |
| 1211 | vb->state = state; | ||
| 1212 | } | ||
| 1207 | atomic_dec(&q->owned_by_drv_count); | 1213 | atomic_dec(&q->owned_by_drv_count); |
| 1208 | spin_unlock_irqrestore(&q->done_lock, flags); | 1214 | spin_unlock_irqrestore(&q->done_lock, flags); |
| 1209 | 1215 | ||
| 1210 | if (state == VB2_BUF_STATE_QUEUED) { | 1216 | switch (state) { |
| 1217 | case VB2_BUF_STATE_QUEUED: | ||
| 1218 | return; | ||
| 1219 | case VB2_BUF_STATE_REQUEUEING: | ||
| 1211 | if (q->start_streaming_called) | 1220 | if (q->start_streaming_called) |
| 1212 | __enqueue_in_driver(vb); | 1221 | __enqueue_in_driver(vb); |
| 1213 | return; | 1222 | return; |
| 1223 | default: | ||
| 1224 | /* Inform any processes that may be waiting for buffers */ | ||
| 1225 | wake_up(&q->done_wq); | ||
| 1226 | break; | ||
| 1214 | } | 1227 | } |
| 1215 | |||
| 1216 | /* Inform any processes that may be waiting for buffers */ | ||
| 1217 | wake_up(&q->done_wq); | ||
| 1218 | } | 1228 | } |
| 1219 | EXPORT_SYMBOL_GPL(vb2_buffer_done); | 1229 | EXPORT_SYMBOL_GPL(vb2_buffer_done); |
| 1220 | 1230 | ||
| @@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done); | |||
| 1244 | 1254 | ||
| 1245 | static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) | 1255 | static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) |
| 1246 | { | 1256 | { |
| 1247 | static bool __check_once __read_mostly; | 1257 | static bool check_once; |
| 1248 | 1258 | ||
| 1249 | if (__check_once) | 1259 | if (check_once) |
| 1250 | return; | 1260 | return; |
| 1251 | 1261 | ||
| 1252 | __check_once = true; | 1262 | check_once = true; |
| 1253 | __WARN(); | 1263 | WARN_ON(1); |
| 1254 | 1264 | ||
| 1255 | pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n"); | 1265 | pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); |
| 1256 | if (vb->vb2_queue->allow_zero_bytesused) | 1266 | if (vb->vb2_queue->allow_zero_bytesused) |
| 1257 | pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); | 1267 | pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); |
| 1258 | else | 1268 | else |
| 1259 | pr_warn_once("use the actual size instead.\n"); | 1269 | pr_warn("use the actual size instead.\n"); |
| 1260 | } | 1270 | } |
| 1261 | 1271 | ||
| 1262 | /** | 1272 | /** |
