diff options
author | Maxim Levitsky <maximlevitsky@gmail.com> | 2010-10-16 18:56:30 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2010-10-21 09:04:34 -0400 |
commit | c29bc4d77d530af27d066d54e9d2c612dd1b9018 (patch) | |
tree | 106704402a39f6c11325bbdffe125fe502cca3cf | |
parent | e1b1ddbe8f415343ed8da323964498f4f0e1b693 (diff) |
[media] IR: ene_ir: don't upload all settings on each TX packet
This is just unnessesary, and now more logical
Also a lot of refactoring
Signed-off-by: Maxim Levitsky <maximlevitsky@gmail.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
-rw-r--r-- | drivers/media/IR/ene_ir.c | 474 | ||||
-rw-r--r-- | drivers/media/IR/ene_ir.h | 6 |
2 files changed, 251 insertions, 229 deletions
diff --git a/drivers/media/IR/ene_ir.c b/drivers/media/IR/ene_ir.c index d546b5ef331b..7637babcd262 100644 --- a/drivers/media/IR/ene_ir.c +++ b/drivers/media/IR/ene_ir.c | |||
@@ -43,7 +43,7 @@ | |||
43 | #include "ene_ir.h" | 43 | #include "ene_ir.h" |
44 | 44 | ||
45 | static int sample_period; | 45 | static int sample_period; |
46 | static bool learning_mode; | 46 | static bool learning_mode_force; |
47 | static int debug; | 47 | static int debug; |
48 | static bool txsim; | 48 | static bool txsim; |
49 | 49 | ||
@@ -190,6 +190,145 @@ static int ene_hw_detect(struct ene_device *dev) | |||
190 | return 0; | 190 | return 0; |
191 | } | 191 | } |
192 | 192 | ||
193 | /* Read properities of hw sample buffer */ | ||
194 | static void ene_rx_setup_hw_buffer(struct ene_device *dev) | ||
195 | { | ||
196 | u16 tmp; | ||
197 | |||
198 | ene_rx_read_hw_pointer(dev); | ||
199 | dev->r_pointer = dev->w_pointer; | ||
200 | |||
201 | if (!dev->hw_extra_buffer) { | ||
202 | dev->buffer_len = ENE_FW_PACKET_SIZE * 2; | ||
203 | return; | ||
204 | } | ||
205 | |||
206 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER); | ||
207 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8; | ||
208 | dev->extra_buf1_address = tmp; | ||
209 | |||
210 | dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2); | ||
211 | |||
212 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3); | ||
213 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8; | ||
214 | dev->extra_buf2_address = tmp; | ||
215 | |||
216 | dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5); | ||
217 | |||
218 | dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8; | ||
219 | |||
220 | ene_notice("Hardware uses 2 extended buffers:"); | ||
221 | ene_notice(" 0x%04x - len : %d", dev->extra_buf1_address, | ||
222 | dev->extra_buf1_len); | ||
223 | ene_notice(" 0x%04x - len : %d", dev->extra_buf2_address, | ||
224 | dev->extra_buf2_len); | ||
225 | |||
226 | ene_notice("Total buffer len = %d", dev->buffer_len); | ||
227 | |||
228 | if (dev->buffer_len > 64 || dev->buffer_len < 16) | ||
229 | goto error; | ||
230 | |||
231 | if (dev->extra_buf1_address > 0xFBFC || | ||
232 | dev->extra_buf1_address < 0xEC00) | ||
233 | goto error; | ||
234 | |||
235 | if (dev->extra_buf2_address > 0xFBFC || | ||
236 | dev->extra_buf2_address < 0xEC00) | ||
237 | goto error; | ||
238 | |||
239 | if (dev->r_pointer > dev->buffer_len) | ||
240 | goto error; | ||
241 | |||
242 | ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | ||
243 | return; | ||
244 | error: | ||
245 | ene_warn("Error validating extra buffers, device probably won't work"); | ||
246 | dev->hw_extra_buffer = false; | ||
247 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | ||
248 | } | ||
249 | |||
250 | |||
251 | /* Restore the pointers to extra buffers - to make module reload work*/ | ||
252 | static void ene_rx_restore_hw_buffer(struct ene_device *dev) | ||
253 | { | ||
254 | if (!dev->hw_extra_buffer) | ||
255 | return; | ||
256 | |||
257 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0, | ||
258 | dev->extra_buf1_address & 0xFF); | ||
259 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1, | ||
260 | dev->extra_buf1_address >> 8); | ||
261 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len); | ||
262 | |||
263 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3, | ||
264 | dev->extra_buf2_address & 0xFF); | ||
265 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4, | ||
266 | dev->extra_buf2_address >> 8); | ||
267 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5, | ||
268 | dev->extra_buf2_len); | ||
269 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | ||
270 | } | ||
271 | |||
272 | /* Read hardware write pointer */ | ||
273 | static void ene_rx_read_hw_pointer(struct ene_device *dev) | ||
274 | { | ||
275 | if (dev->hw_extra_buffer) | ||
276 | dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER); | ||
277 | else | ||
278 | dev->w_pointer = ene_read_reg(dev, ENE_FW2) | ||
279 | & ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE; | ||
280 | |||
281 | dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x", | ||
282 | dev->w_pointer, dev->r_pointer); | ||
283 | } | ||
284 | |||
285 | /* Gets address of next sample from HW ring buffer */ | ||
286 | static int ene_rx_get_sample_reg(struct ene_device *dev) | ||
287 | { | ||
288 | int r_pointer; | ||
289 | |||
290 | if (dev->r_pointer == dev->w_pointer) { | ||
291 | dbg_verbose("RB: hit end, try update w_pointer"); | ||
292 | ene_rx_read_hw_pointer(dev); | ||
293 | } | ||
294 | |||
295 | if (dev->r_pointer == dev->w_pointer) { | ||
296 | dbg_verbose("RB: end of data at %d", dev->r_pointer); | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | dbg_verbose("RB: reading at offset %d", dev->r_pointer); | ||
301 | r_pointer = dev->r_pointer; | ||
302 | |||
303 | dev->r_pointer++; | ||
304 | if (dev->r_pointer == dev->buffer_len) | ||
305 | dev->r_pointer = 0; | ||
306 | |||
307 | dbg_verbose("RB: next read will be from offset %d", dev->r_pointer); | ||
308 | |||
309 | if (r_pointer < 8) { | ||
310 | dbg_verbose("RB: read at main buffer at %d", r_pointer); | ||
311 | return ENE_FW_SAMPLE_BUFFER + r_pointer; | ||
312 | } | ||
313 | |||
314 | r_pointer -= 8; | ||
315 | |||
316 | if (r_pointer < dev->extra_buf1_len) { | ||
317 | dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer); | ||
318 | return dev->extra_buf1_address + r_pointer; | ||
319 | } | ||
320 | |||
321 | r_pointer -= dev->extra_buf1_len; | ||
322 | |||
323 | if (r_pointer < dev->extra_buf2_len) { | ||
324 | dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer); | ||
325 | return dev->extra_buf2_address + r_pointer; | ||
326 | } | ||
327 | |||
328 | dbg("attempt to read beyong ring bufer end"); | ||
329 | return 0; | ||
330 | } | ||
331 | |||
193 | /* Sense current received carrier */ | 332 | /* Sense current received carrier */ |
194 | void ene_rx_sense_carrier(struct ene_device *dev) | 333 | void ene_rx_sense_carrier(struct ene_device *dev) |
195 | { | 334 | { |
@@ -223,14 +362,14 @@ void ene_rx_sense_carrier(struct ene_device *dev) | |||
223 | } | 362 | } |
224 | 363 | ||
225 | /* this enables/disables the CIR RX engine */ | 364 | /* this enables/disables the CIR RX engine */ |
226 | static void ene_enable_cir_engine(struct ene_device *dev, bool enable) | 365 | static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable) |
227 | { | 366 | { |
228 | ene_set_clear_reg_mask(dev, ENE_CIRCFG, | 367 | ene_set_clear_reg_mask(dev, ENE_CIRCFG, |
229 | ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable); | 368 | ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable); |
230 | } | 369 | } |
231 | 370 | ||
232 | /* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/ | 371 | /* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/ |
233 | static void ene_select_rx_input(struct ene_device *dev, bool gpio_0a) | 372 | static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a) |
234 | { | 373 | { |
235 | ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a); | 374 | ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a); |
236 | } | 375 | } |
@@ -239,7 +378,7 @@ static void ene_select_rx_input(struct ene_device *dev, bool gpio_0a) | |||
239 | * this enables alternative input via fan tachometer sensor and bypasses | 378 | * this enables alternative input via fan tachometer sensor and bypasses |
240 | * the hw CIR engine | 379 | * the hw CIR engine |
241 | */ | 380 | */ |
242 | static void ene_enable_fan_input(struct ene_device *dev, bool enable) | 381 | static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable) |
243 | { | 382 | { |
244 | if (!dev->hw_fan_input) | 383 | if (!dev->hw_fan_input) |
245 | return; | 384 | return; |
@@ -250,16 +389,18 @@ static void ene_enable_fan_input(struct ene_device *dev, bool enable) | |||
250 | ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN); | 389 | ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN); |
251 | ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN); | 390 | ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN); |
252 | } | 391 | } |
253 | dev->rx_fan_input_inuse = enable; | ||
254 | } | 392 | } |
255 | 393 | ||
256 | /* setup the receiver for RX*/ | 394 | /* setup the receiver for RX*/ |
257 | static void ene_rx_setup(struct ene_device *dev) | 395 | static void ene_rx_setup(struct ene_device *dev) |
258 | { | 396 | { |
259 | bool learning_mode = dev->learning_enabled || | 397 | bool learning_mode = dev->learning_mode_enabled || |
260 | dev->carrier_detect_enabled; | 398 | dev->carrier_detect_enabled; |
261 | int sample_period_adjust = 0; | 399 | int sample_period_adjust = 0; |
262 | 400 | ||
401 | dbg("RX: setup receiver, learning mode = %d", learning_mode); | ||
402 | |||
403 | |||
263 | /* This selects RLC input and clears CFG2 settings */ | 404 | /* This selects RLC input and clears CFG2 settings */ |
264 | ene_write_reg(dev, ENE_CIRCFG2, 0x00); | 405 | ene_write_reg(dev, ENE_CIRCFG2, 0x00); |
265 | 406 | ||
@@ -284,7 +425,7 @@ static void ene_rx_setup(struct ene_device *dev) | |||
284 | and vice versa. | 425 | and vice versa. |
285 | This input will carry non demodulated | 426 | This input will carry non demodulated |
286 | signal, and we will tell the hw to demodulate it itself */ | 427 | signal, and we will tell the hw to demodulate it itself */ |
287 | ene_select_rx_input(dev, !dev->hw_use_gpio_0a); | 428 | ene_rx_select_input(dev, !dev->hw_use_gpio_0a); |
288 | dev->rx_fan_input_inuse = false; | 429 | dev->rx_fan_input_inuse = false; |
289 | 430 | ||
290 | /* Enable carrier demodulation */ | 431 | /* Enable carrier demodulation */ |
@@ -298,7 +439,7 @@ static void ene_rx_setup(struct ene_device *dev) | |||
298 | if (dev->hw_fan_input) | 439 | if (dev->hw_fan_input) |
299 | dev->rx_fan_input_inuse = true; | 440 | dev->rx_fan_input_inuse = true; |
300 | else | 441 | else |
301 | ene_select_rx_input(dev, dev->hw_use_gpio_0a); | 442 | ene_rx_select_input(dev, dev->hw_use_gpio_0a); |
302 | 443 | ||
303 | /* Disable carrier detection & demodulation */ | 444 | /* Disable carrier detection & demodulation */ |
304 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); | 445 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); |
@@ -339,7 +480,6 @@ select_timeout: | |||
339 | static void ene_rx_enable(struct ene_device *dev) | 480 | static void ene_rx_enable(struct ene_device *dev) |
340 | { | 481 | { |
341 | u8 reg_value; | 482 | u8 reg_value; |
342 | dbg("RX: setup receiver, learning mode = %d", learning_mode); | ||
343 | 483 | ||
344 | /* Enable system interrupt */ | 484 | /* Enable system interrupt */ |
345 | if (dev->hw_revision < ENE_HW_C) { | 485 | if (dev->hw_revision < ENE_HW_C) { |
@@ -354,8 +494,8 @@ static void ene_rx_enable(struct ene_device *dev) | |||
354 | } | 494 | } |
355 | 495 | ||
356 | /* Enable inputs */ | 496 | /* Enable inputs */ |
357 | ene_enable_fan_input(dev, dev->rx_fan_input_inuse); | 497 | ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse); |
358 | ene_enable_cir_engine(dev, !dev->rx_fan_input_inuse); | 498 | ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse); |
359 | 499 | ||
360 | /* ack any pending irqs - just in case */ | 500 | /* ack any pending irqs - just in case */ |
361 | ene_irq_status(dev); | 501 | ene_irq_status(dev); |
@@ -372,8 +512,8 @@ static void ene_rx_enable(struct ene_device *dev) | |||
372 | static void ene_rx_disable(struct ene_device *dev) | 512 | static void ene_rx_disable(struct ene_device *dev) |
373 | { | 513 | { |
374 | /* disable inputs */ | 514 | /* disable inputs */ |
375 | ene_enable_cir_engine(dev, false); | 515 | ene_rx_enable_cir_engine(dev, false); |
376 | ene_enable_fan_input(dev, false); | 516 | ene_rx_enable_fan_input(dev, false); |
377 | 517 | ||
378 | /* disable hardware IRQ and firmware flag */ | 518 | /* disable hardware IRQ and firmware flag */ |
379 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); | 519 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); |
@@ -382,8 +522,60 @@ static void ene_rx_disable(struct ene_device *dev) | |||
382 | dev->rx_enabled = false; | 522 | dev->rx_enabled = false; |
383 | } | 523 | } |
384 | 524 | ||
525 | /* This resets the receiver. Usefull to stop stream of spaces at end of | ||
526 | * transmission | ||
527 | */ | ||
528 | static void ene_rx_reset(struct ene_device *dev) | ||
529 | { | ||
530 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | ||
531 | ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | ||
532 | } | ||
533 | |||
534 | /* Set up the TX carrier frequency and duty cycle */ | ||
535 | static void ene_tx_set_carrier(struct ene_device *dev) | ||
536 | { | ||
537 | u8 tx_puls_width; | ||
538 | unsigned long flags; | ||
539 | |||
540 | spin_lock_irqsave(&dev->hw_lock, flags); | ||
541 | |||
542 | ene_set_clear_reg_mask(dev, ENE_CIRCFG, | ||
543 | ENE_CIRCFG_TX_CARR, dev->tx_period > 0); | ||
544 | |||
545 | if (!dev->tx_period) | ||
546 | goto unlock; | ||
547 | |||
548 | BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0); | ||
549 | |||
550 | tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle); | ||
551 | |||
552 | if (!tx_puls_width) | ||
553 | tx_puls_width = 1; | ||
554 | |||
555 | dbg("TX: pulse distance = %d * 500 ns", dev->tx_period); | ||
556 | dbg("TX: pulse width = %d * 500 ns", tx_puls_width); | ||
557 | |||
558 | ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL); | ||
559 | ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width); | ||
560 | unlock: | ||
561 | spin_unlock_irqrestore(&dev->hw_lock, flags); | ||
562 | } | ||
563 | |||
564 | /* Enable/disable transmitters */ | ||
565 | static void ene_tx_set_transmitters(struct ene_device *dev) | ||
566 | { | ||
567 | unsigned long flags; | ||
568 | |||
569 | spin_lock_irqsave(&dev->hw_lock, flags); | ||
570 | ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41, | ||
571 | !!(dev->transmitter_mask & 0x01)); | ||
572 | ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D, | ||
573 | !!(dev->transmitter_mask & 0x02)); | ||
574 | spin_unlock_irqrestore(&dev->hw_lock, flags); | ||
575 | } | ||
576 | |||
385 | /* prepare transmission */ | 577 | /* prepare transmission */ |
386 | static void ene_tx_prepare(struct ene_device *dev) | 578 | static void ene_tx_enable(struct ene_device *dev) |
387 | { | 579 | { |
388 | u8 conf1 = ene_read_reg(dev, ENE_CIRCFG); | 580 | u8 conf1 = ene_read_reg(dev, ENE_CIRCFG); |
389 | u8 fwreg2 = ene_read_reg(dev, ENE_FW2); | 581 | u8 fwreg2 = ene_read_reg(dev, ENE_FW2); |
@@ -400,32 +592,6 @@ static void ene_tx_prepare(struct ene_device *dev) | |||
400 | if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN))) | 592 | if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN))) |
401 | ene_warn("TX: transmitter cable isn't connected!"); | 593 | ene_warn("TX: transmitter cable isn't connected!"); |
402 | 594 | ||
403 | /* Set transmitter mask */ | ||
404 | ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41, | ||
405 | !!(dev->transmitter_mask & 0x01)); | ||
406 | ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D, | ||
407 | !!(dev->transmitter_mask & 0x02)); | ||
408 | |||
409 | /* Set the carrier period && duty cycle */ | ||
410 | if (dev->tx_period) { | ||
411 | |||
412 | int tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle); | ||
413 | |||
414 | if (!tx_puls_width) | ||
415 | tx_puls_width = 1; | ||
416 | |||
417 | dbg("TX: pulse distance = %d * 500 ns", dev->tx_period); | ||
418 | dbg("TX: pulse width = %d * 500 ns", tx_puls_width); | ||
419 | |||
420 | ene_write_reg(dev, ENE_CIRMOD_PRD, ENE_CIRMOD_PRD_POL | | ||
421 | dev->tx_period); | ||
422 | |||
423 | ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width); | ||
424 | |||
425 | conf1 |= ENE_CIRCFG_TX_CARR; | ||
426 | } else | ||
427 | conf1 &= ~ENE_CIRCFG_TX_CARR; | ||
428 | |||
429 | /* disable receive on revc */ | 595 | /* disable receive on revc */ |
430 | if (dev->hw_revision == ENE_HW_C) | 596 | if (dev->hw_revision == ENE_HW_C) |
431 | conf1 &= ~ENE_CIRCFG_RX_EN; | 597 | conf1 &= ~ENE_CIRCFG_RX_EN; |
@@ -436,7 +602,7 @@ static void ene_tx_prepare(struct ene_device *dev) | |||
436 | } | 602 | } |
437 | 603 | ||
438 | /* end transmission */ | 604 | /* end transmission */ |
439 | static void ene_tx_complete(struct ene_device *dev) | 605 | static void ene_tx_disable(struct ene_device *dev) |
440 | { | 606 | { |
441 | ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1); | 607 | ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1); |
442 | dev->tx_buffer = NULL; | 608 | dev->tx_buffer = NULL; |
@@ -465,7 +631,7 @@ static void ene_tx_sample(struct ene_device *dev) | |||
465 | goto exit; | 631 | goto exit; |
466 | } else { | 632 | } else { |
467 | dbg("TX: last sample sent by hardware"); | 633 | dbg("TX: last sample sent by hardware"); |
468 | ene_tx_complete(dev); | 634 | ene_tx_disable(dev); |
469 | complete(&dev->tx_complete); | 635 | complete(&dev->tx_complete); |
470 | return; | 636 | return; |
471 | } | 637 | } |
@@ -509,85 +675,6 @@ static void ene_tx_irqsim(unsigned long data) | |||
509 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 675 | spin_unlock_irqrestore(&dev->hw_lock, flags); |
510 | } | 676 | } |
511 | 677 | ||
512 | /* Read properities of hw sample buffer */ | ||
513 | static void ene_setup_hw_buffer(struct ene_device *dev) | ||
514 | { | ||
515 | u16 tmp; | ||
516 | |||
517 | ene_read_hw_pointer(dev); | ||
518 | dev->r_pointer = dev->w_pointer; | ||
519 | |||
520 | if (!dev->hw_extra_buffer) { | ||
521 | dev->buffer_len = ENE_FW_PACKET_SIZE * 2; | ||
522 | return; | ||
523 | } | ||
524 | |||
525 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER); | ||
526 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8; | ||
527 | dev->extra_buf1_address = tmp; | ||
528 | |||
529 | dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2); | ||
530 | |||
531 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3); | ||
532 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8; | ||
533 | dev->extra_buf2_address = tmp; | ||
534 | |||
535 | dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5); | ||
536 | |||
537 | dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8; | ||
538 | |||
539 | ene_notice("Hardware uses 2 extended buffers:"); | ||
540 | ene_notice(" 0x%04x - len : %d", dev->extra_buf1_address, | ||
541 | dev->extra_buf1_len); | ||
542 | ene_notice(" 0x%04x - len : %d", dev->extra_buf2_address, | ||
543 | dev->extra_buf2_len); | ||
544 | |||
545 | ene_notice("Total buffer len = %d", dev->buffer_len); | ||
546 | |||
547 | if (dev->buffer_len > 64 || dev->buffer_len < 16) | ||
548 | goto error; | ||
549 | |||
550 | if (dev->extra_buf1_address > 0xFBFC || | ||
551 | dev->extra_buf1_address < 0xEC00) | ||
552 | goto error; | ||
553 | |||
554 | if (dev->extra_buf2_address > 0xFBFC || | ||
555 | dev->extra_buf2_address < 0xEC00) | ||
556 | goto error; | ||
557 | |||
558 | if (dev->r_pointer > dev->buffer_len) | ||
559 | goto error; | ||
560 | |||
561 | ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | ||
562 | return; | ||
563 | error: | ||
564 | ene_warn("Error validating extra buffers, device probably won't work"); | ||
565 | dev->hw_extra_buffer = false; | ||
566 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | ||
567 | } | ||
568 | |||
569 | |||
570 | /* Restore the pointers to extra buffers - to make module reload work*/ | ||
571 | static void ene_restore_extra_buffer(struct ene_device *dev) | ||
572 | { | ||
573 | if (!dev->hw_extra_buffer) | ||
574 | return; | ||
575 | |||
576 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0, | ||
577 | dev->extra_buf1_address & 0xFF); | ||
578 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1, | ||
579 | dev->extra_buf1_address >> 8); | ||
580 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len); | ||
581 | |||
582 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3, | ||
583 | dev->extra_buf2_address & 0xFF); | ||
584 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4, | ||
585 | dev->extra_buf2_address >> 8); | ||
586 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5, | ||
587 | dev->extra_buf2_len); | ||
588 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | ||
589 | } | ||
590 | |||
591 | 678 | ||
592 | /* read irq status and ack it */ | 679 | /* read irq status and ack it */ |
593 | static int ene_irq_status(struct ene_device *dev) | 680 | static int ene_irq_status(struct ene_device *dev) |
@@ -632,66 +719,6 @@ static int ene_irq_status(struct ene_device *dev) | |||
632 | return retval; | 719 | return retval; |
633 | } | 720 | } |
634 | 721 | ||
635 | /* Read hardware write pointer */ | ||
636 | static void ene_read_hw_pointer(struct ene_device *dev) | ||
637 | { | ||
638 | if (dev->hw_extra_buffer) | ||
639 | dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER); | ||
640 | else | ||
641 | dev->w_pointer = ene_read_reg(dev, ENE_FW2) | ||
642 | & ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE; | ||
643 | |||
644 | dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x", | ||
645 | dev->w_pointer, dev->r_pointer); | ||
646 | } | ||
647 | |||
648 | /* Gets address of next sample from HW ring buffer */ | ||
649 | static int ene_get_sample_reg(struct ene_device *dev) | ||
650 | { | ||
651 | int r_pointer; | ||
652 | |||
653 | if (dev->r_pointer == dev->w_pointer) { | ||
654 | dbg_verbose("RB: hit end, try update w_pointer"); | ||
655 | ene_read_hw_pointer(dev); | ||
656 | } | ||
657 | |||
658 | if (dev->r_pointer == dev->w_pointer) { | ||
659 | dbg_verbose("RB: end of data at %d", dev->r_pointer); | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | dbg_verbose("RB: reading at offset %d", dev->r_pointer); | ||
664 | r_pointer = dev->r_pointer; | ||
665 | |||
666 | dev->r_pointer++; | ||
667 | if (dev->r_pointer == dev->buffer_len) | ||
668 | dev->r_pointer = 0; | ||
669 | |||
670 | dbg_verbose("RB: next read will be from offset %d", dev->r_pointer); | ||
671 | |||
672 | if (r_pointer < 8) { | ||
673 | dbg_verbose("RB: read at main buffer at %d", r_pointer); | ||
674 | return ENE_FW_SAMPLE_BUFFER + r_pointer; | ||
675 | } | ||
676 | |||
677 | r_pointer -= 8; | ||
678 | |||
679 | if (r_pointer < dev->extra_buf1_len) { | ||
680 | dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer); | ||
681 | return dev->extra_buf1_address + r_pointer; | ||
682 | } | ||
683 | |||
684 | r_pointer -= dev->extra_buf1_len; | ||
685 | |||
686 | if (r_pointer < dev->extra_buf2_len) { | ||
687 | dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer); | ||
688 | return dev->extra_buf2_address + r_pointer; | ||
689 | } | ||
690 | |||
691 | dbg("attempt to read beyong ring bufer end"); | ||
692 | return 0; | ||
693 | } | ||
694 | |||
695 | /* interrupt handler */ | 722 | /* interrupt handler */ |
696 | static irqreturn_t ene_isr(int irq, void *data) | 723 | static irqreturn_t ene_isr(int irq, void *data) |
697 | { | 724 | { |
@@ -706,7 +733,7 @@ static irqreturn_t ene_isr(int irq, void *data) | |||
706 | spin_lock_irqsave(&dev->hw_lock, flags); | 733 | spin_lock_irqsave(&dev->hw_lock, flags); |
707 | 734 | ||
708 | dbg_verbose("ISR called"); | 735 | dbg_verbose("ISR called"); |
709 | ene_read_hw_pointer(dev); | 736 | ene_rx_read_hw_pointer(dev); |
710 | irq_status = ene_irq_status(dev); | 737 | irq_status = ene_irq_status(dev); |
711 | 738 | ||
712 | if (!irq_status) | 739 | if (!irq_status) |
@@ -738,7 +765,7 @@ static irqreturn_t ene_isr(int irq, void *data) | |||
738 | 765 | ||
739 | while (1) { | 766 | while (1) { |
740 | 767 | ||
741 | reg = ene_get_sample_reg(dev); | 768 | reg = ene_rx_get_sample_reg(dev); |
742 | 769 | ||
743 | dbg_verbose("next sample to read at: %04x", reg); | 770 | dbg_verbose("next sample to read at: %04x", reg); |
744 | if (!reg) | 771 | if (!reg) |
@@ -788,17 +815,28 @@ unlock: | |||
788 | } | 815 | } |
789 | 816 | ||
790 | /* Initialize default settings */ | 817 | /* Initialize default settings */ |
791 | static void ene_setup_settings(struct ene_device *dev) | 818 | static void ene_setup_default_settings(struct ene_device *dev) |
792 | { | 819 | { |
793 | dev->tx_period = 32; | 820 | dev->tx_period = 32; |
794 | dev->tx_duty_cycle = 50; /*%*/ | 821 | dev->tx_duty_cycle = 50; /*%*/ |
795 | dev->transmitter_mask = 0x03; | 822 | dev->transmitter_mask = 0x03; |
796 | dev->learning_enabled = learning_mode; | 823 | dev->learning_mode_enabled = learning_mode_force; |
797 | 824 | ||
798 | /* Set reasonable default timeout */ | 825 | /* Set reasonable default timeout */ |
799 | dev->props->timeout = MS_TO_NS(150000); | 826 | dev->props->timeout = MS_TO_NS(150000); |
800 | } | 827 | } |
801 | 828 | ||
829 | /* Upload all hardware settings at once. Used at load and resume time */ | ||
830 | static void ene_setup_hw_settings(struct ene_device *dev) | ||
831 | { | ||
832 | if (dev->hw_learning_and_tx_capable) { | ||
833 | ene_tx_set_carrier(dev); | ||
834 | ene_tx_set_transmitters(dev); | ||
835 | } | ||
836 | |||
837 | ene_rx_setup(dev); | ||
838 | } | ||
839 | |||
802 | /* outside interface: called on first open*/ | 840 | /* outside interface: called on first open*/ |
803 | static int ene_open(void *data) | 841 | static int ene_open(void *data) |
804 | { | 842 | { |
@@ -826,7 +864,6 @@ static void ene_close(void *data) | |||
826 | static int ene_set_tx_mask(void *data, u32 tx_mask) | 864 | static int ene_set_tx_mask(void *data, u32 tx_mask) |
827 | { | 865 | { |
828 | struct ene_device *dev = (struct ene_device *)data; | 866 | struct ene_device *dev = (struct ene_device *)data; |
829 | unsigned long flags; | ||
830 | dbg("TX: attempt to set transmitter mask %02x", tx_mask); | 867 | dbg("TX: attempt to set transmitter mask %02x", tx_mask); |
831 | 868 | ||
832 | /* invalid txmask */ | 869 | /* invalid txmask */ |
@@ -836,9 +873,8 @@ static int ene_set_tx_mask(void *data, u32 tx_mask) | |||
836 | return 2; | 873 | return 2; |
837 | } | 874 | } |
838 | 875 | ||
839 | spin_lock_irqsave(&dev->hw_lock, flags); | ||
840 | dev->transmitter_mask = tx_mask; | 876 | dev->transmitter_mask = tx_mask; |
841 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 877 | ene_tx_set_transmitters(dev); |
842 | return 0; | 878 | return 0; |
843 | } | 879 | } |
844 | 880 | ||
@@ -846,7 +882,6 @@ static int ene_set_tx_mask(void *data, u32 tx_mask) | |||
846 | static int ene_set_tx_carrier(void *data, u32 carrier) | 882 | static int ene_set_tx_carrier(void *data, u32 carrier) |
847 | { | 883 | { |
848 | struct ene_device *dev = (struct ene_device *)data; | 884 | struct ene_device *dev = (struct ene_device *)data; |
849 | unsigned long flags; | ||
850 | u32 period = 2000000 / carrier; | 885 | u32 period = 2000000 / carrier; |
851 | 886 | ||
852 | dbg("TX: attempt to set tx carrier to %d kHz", carrier); | 887 | dbg("TX: attempt to set tx carrier to %d kHz", carrier); |
@@ -855,16 +890,12 @@ static int ene_set_tx_carrier(void *data, u32 carrier) | |||
855 | period < ENE_CIRMOD_PRD_MIN)) { | 890 | period < ENE_CIRMOD_PRD_MIN)) { |
856 | 891 | ||
857 | dbg("TX: out of range %d-%d kHz carrier", | 892 | dbg("TX: out of range %d-%d kHz carrier", |
858 | 2000 / ENE_CIRMOD_PRD_MIN, | 893 | 2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX); |
859 | 2000 / ENE_CIRMOD_PRD_MAX); | ||
860 | |||
861 | return -1; | 894 | return -1; |
862 | } | 895 | } |
863 | 896 | ||
864 | dbg("TX: set carrier to %d kHz", carrier); | ||
865 | spin_lock_irqsave(&dev->hw_lock, flags); | ||
866 | dev->tx_period = period; | 897 | dev->tx_period = period; |
867 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 898 | ene_tx_set_carrier(dev); |
868 | return 0; | 899 | return 0; |
869 | } | 900 | } |
870 | 901 | ||
@@ -872,15 +903,9 @@ static int ene_set_tx_carrier(void *data, u32 carrier) | |||
872 | static int ene_set_tx_duty_cycle(void *data, u32 duty_cycle) | 903 | static int ene_set_tx_duty_cycle(void *data, u32 duty_cycle) |
873 | { | 904 | { |
874 | struct ene_device *dev = (struct ene_device *)data; | 905 | struct ene_device *dev = (struct ene_device *)data; |
875 | unsigned long flags; | ||
876 | |||
877 | dbg("TX: setting duty cycle to %d%%", duty_cycle); | 906 | dbg("TX: setting duty cycle to %d%%", duty_cycle); |
878 | |||
879 | BUG_ON(!duty_cycle || duty_cycle >= 100); | ||
880 | |||
881 | spin_lock_irqsave(&dev->hw_lock, flags); | ||
882 | dev->tx_duty_cycle = duty_cycle; | 907 | dev->tx_duty_cycle = duty_cycle; |
883 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 908 | ene_tx_set_carrier(dev); |
884 | return 0; | 909 | return 0; |
885 | } | 910 | } |
886 | 911 | ||
@@ -889,11 +914,11 @@ static int ene_set_learning_mode(void *data, int enable) | |||
889 | { | 914 | { |
890 | struct ene_device *dev = (struct ene_device *)data; | 915 | struct ene_device *dev = (struct ene_device *)data; |
891 | unsigned long flags; | 916 | unsigned long flags; |
892 | if (enable == dev->learning_enabled) | 917 | if (enable == dev->learning_mode_enabled) |
893 | return 0; | 918 | return 0; |
894 | 919 | ||
895 | spin_lock_irqsave(&dev->hw_lock, flags); | 920 | spin_lock_irqsave(&dev->hw_lock, flags); |
896 | dev->learning_enabled = enable; | 921 | dev->learning_mode_enabled = enable; |
897 | ene_rx_disable(dev); | 922 | ene_rx_disable(dev); |
898 | ene_rx_setup(dev); | 923 | ene_rx_setup(dev); |
899 | ene_rx_enable(dev); | 924 | ene_rx_enable(dev); |
@@ -919,16 +944,12 @@ static int ene_set_carrier_report(void *data, int enable) | |||
919 | } | 944 | } |
920 | 945 | ||
921 | /* outside interface: enable or disable idle mode */ | 946 | /* outside interface: enable or disable idle mode */ |
922 | static void ene_rx_set_idle(void *data, bool idle) | 947 | static void ene_set_idle(void *data, bool idle) |
923 | { | 948 | { |
924 | struct ene_device *dev = (struct ene_device *)data; | 949 | if (idle) { |
925 | 950 | ene_rx_reset((struct ene_device *)data); | |
926 | if (!idle) | 951 | dbg("RX: end of data"); |
927 | return; | 952 | } |
928 | |||
929 | dbg("RX: stopping the receiver"); | ||
930 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | ||
931 | ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | ||
932 | } | 953 | } |
933 | 954 | ||
934 | /* outside interface: transmit */ | 955 | /* outside interface: transmit */ |
@@ -949,7 +970,7 @@ static int ene_transmit(void *data, int *buf, u32 n) | |||
949 | 970 | ||
950 | spin_lock_irqsave(&dev->hw_lock, flags); | 971 | spin_lock_irqsave(&dev->hw_lock, flags); |
951 | 972 | ||
952 | ene_tx_prepare(dev); | 973 | ene_tx_enable(dev); |
953 | 974 | ||
954 | /* Transmit first two samples */ | 975 | /* Transmit first two samples */ |
955 | ene_tx_sample(dev); | 976 | ene_tx_sample(dev); |
@@ -960,7 +981,7 @@ static int ene_transmit(void *data, int *buf, u32 n) | |||
960 | if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) { | 981 | if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) { |
961 | dbg("TX: timeout"); | 982 | dbg("TX: timeout"); |
962 | spin_lock_irqsave(&dev->hw_lock, flags); | 983 | spin_lock_irqsave(&dev->hw_lock, flags); |
963 | ene_tx_complete(dev); | 984 | ene_tx_disable(dev); |
964 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 985 | spin_unlock_irqrestore(&dev->hw_lock, flags); |
965 | } else | 986 | } else |
966 | dbg("TX: done"); | 987 | dbg("TX: done"); |
@@ -1031,14 +1052,14 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) | |||
1031 | } | 1052 | } |
1032 | 1053 | ||
1033 | if (!dev->hw_learning_and_tx_capable) | 1054 | if (!dev->hw_learning_and_tx_capable) |
1034 | learning_mode = false; | 1055 | learning_mode_force = false; |
1035 | 1056 | ||
1036 | ir_props->driver_type = RC_DRIVER_IR_RAW; | 1057 | ir_props->driver_type = RC_DRIVER_IR_RAW; |
1037 | ir_props->allowed_protos = IR_TYPE_ALL; | 1058 | ir_props->allowed_protos = IR_TYPE_ALL; |
1038 | ir_props->priv = dev; | 1059 | ir_props->priv = dev; |
1039 | ir_props->open = ene_open; | 1060 | ir_props->open = ene_open; |
1040 | ir_props->close = ene_close; | 1061 | ir_props->close = ene_close; |
1041 | ir_props->s_idle = ene_rx_set_idle; | 1062 | ir_props->s_idle = ene_set_idle; |
1042 | 1063 | ||
1043 | dev->props = ir_props; | 1064 | dev->props = ir_props; |
1044 | dev->idev = input_dev; | 1065 | dev->idev = input_dev; |
@@ -1053,9 +1074,9 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) | |||
1053 | ir_props->s_carrier_report = ene_set_carrier_report; | 1074 | ir_props->s_carrier_report = ene_set_carrier_report; |
1054 | } | 1075 | } |
1055 | 1076 | ||
1056 | ene_setup_hw_buffer(dev); | 1077 | ene_rx_setup_hw_buffer(dev); |
1057 | ene_setup_settings(dev); | 1078 | ene_setup_default_settings(dev); |
1058 | ene_rx_setup(dev); | 1079 | ene_setup_hw_settings(dev); |
1059 | 1080 | ||
1060 | device_set_wakeup_capable(&pnp_dev->dev, true); | 1081 | device_set_wakeup_capable(&pnp_dev->dev, true); |
1061 | device_set_wakeup_enable(&pnp_dev->dev, true); | 1082 | device_set_wakeup_enable(&pnp_dev->dev, true); |
@@ -1092,7 +1113,7 @@ static void ene_remove(struct pnp_dev *pnp_dev) | |||
1092 | 1113 | ||
1093 | spin_lock_irqsave(&dev->hw_lock, flags); | 1114 | spin_lock_irqsave(&dev->hw_lock, flags); |
1094 | ene_rx_disable(dev); | 1115 | ene_rx_disable(dev); |
1095 | ene_restore_extra_buffer(dev); | 1116 | ene_rx_restore_hw_buffer(dev); |
1096 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 1117 | spin_unlock_irqrestore(&dev->hw_lock, flags); |
1097 | 1118 | ||
1098 | free_irq(dev->irq, dev); | 1119 | free_irq(dev->irq, dev); |
@@ -1123,10 +1144,11 @@ static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state) | |||
1123 | static int ene_resume(struct pnp_dev *pnp_dev) | 1144 | static int ene_resume(struct pnp_dev *pnp_dev) |
1124 | { | 1145 | { |
1125 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | 1146 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); |
1126 | if (dev->rx_enabled) { | 1147 | ene_setup_hw_settings(dev); |
1127 | ene_rx_setup(dev); | 1148 | |
1149 | if (dev->rx_enabled) | ||
1128 | ene_rx_enable(dev); | 1150 | ene_rx_enable(dev); |
1129 | } | 1151 | |
1130 | ene_enable_wake(dev, false); | 1152 | ene_enable_wake(dev, false); |
1131 | return 0; | 1153 | return 0; |
1132 | } | 1154 | } |
@@ -1173,8 +1195,8 @@ static void ene_exit(void) | |||
1173 | module_param(sample_period, int, S_IRUGO); | 1195 | module_param(sample_period, int, S_IRUGO); |
1174 | MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)"); | 1196 | MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)"); |
1175 | 1197 | ||
1176 | module_param(learning_mode, bool, S_IRUGO); | 1198 | module_param(learning_mode_force, bool, S_IRUGO); |
1177 | MODULE_PARM_DESC(learning_mode, "Enable learning mode by default"); | 1199 | MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default"); |
1178 | 1200 | ||
1179 | module_param(debug, int, S_IRUGO | S_IWUSR); | 1201 | module_param(debug, int, S_IRUGO | S_IWUSR); |
1180 | MODULE_PARM_DESC(debug, "Debug level"); | 1202 | MODULE_PARM_DESC(debug, "Debug level"); |
diff --git a/drivers/media/IR/ene_ir.h b/drivers/media/IR/ene_ir.h index 39c707b42412..f5870667a433 100644 --- a/drivers/media/IR/ene_ir.h +++ b/drivers/media/IR/ene_ir.h | |||
@@ -215,7 +215,7 @@ struct ene_device { | |||
215 | 215 | ||
216 | /* HW features */ | 216 | /* HW features */ |
217 | int hw_revision; /* hardware revision */ | 217 | int hw_revision; /* hardware revision */ |
218 | bool hw_use_gpio_0a; /* gpio40 is demodulated input*/ | 218 | bool hw_use_gpio_0a; /* gpio0a is demodulated input*/ |
219 | bool hw_extra_buffer; /* hardware has 'extra buffer' */ | 219 | bool hw_extra_buffer; /* hardware has 'extra buffer' */ |
220 | bool hw_fan_input; /* fan input is IR data source */ | 220 | bool hw_fan_input; /* fan input is IR data source */ |
221 | bool hw_learning_and_tx_capable; /* learning & tx capable */ | 221 | bool hw_learning_and_tx_capable; /* learning & tx capable */ |
@@ -252,11 +252,11 @@ struct ene_device { | |||
252 | int transmitter_mask; | 252 | int transmitter_mask; |
253 | 253 | ||
254 | /* RX settings */ | 254 | /* RX settings */ |
255 | bool learning_enabled; /* learning input enabled */ | 255 | bool learning_mode_enabled; /* learning input enabled */ |
256 | bool carrier_detect_enabled; /* carrier detect enabled */ | 256 | bool carrier_detect_enabled; /* carrier detect enabled */ |
257 | int rx_period_adjust; | 257 | int rx_period_adjust; |
258 | bool rx_enabled; | 258 | bool rx_enabled; |
259 | }; | 259 | }; |
260 | 260 | ||
261 | static int ene_irq_status(struct ene_device *dev); | 261 | static int ene_irq_status(struct ene_device *dev); |
262 | static void ene_read_hw_pointer(struct ene_device *dev); | 262 | static void ene_rx_read_hw_pointer(struct ene_device *dev); |