aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/IR/ir-raw-event.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/IR/ir-raw-event.c')
-rw-r--r--drivers/media/IR/ir-raw-event.c159
1 files changed, 133 insertions, 26 deletions
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 6f192ef..43094e7 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -12,9 +12,10 @@
* GNU General Public License for more details.
*/
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
#include <linux/sched.h>
+#include <linux/freezer.h>
#include "ir-core-priv.h"
/* Define the max number of pulse/space transitions to buffer */
@@ -24,7 +25,7 @@
static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
-static DEFINE_SPINLOCK(ir_raw_handler_lock);
+static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
@@ -33,20 +34,30 @@ static u64 available_protocols;
static struct work_struct wq_load;
#endif
-static void ir_raw_event_work(struct work_struct *work)
+static int ir_raw_event_thread(void *data)
{
struct ir_raw_event ev;
struct ir_raw_handler *handler;
- struct ir_raw_event_ctrl *raw =
- container_of(work, struct ir_raw_event_ctrl, rx_work);
-
- while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
- spin_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list)
- handler->decode(raw->input_dev, ev);
- spin_unlock(&ir_raw_handler_lock);
- raw->prev_ev = ev;
+ struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
+
+ while (!kthread_should_stop()) {
+ try_to_freeze();
+
+ mutex_lock(&ir_raw_handler_lock);
+
+ while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
+ handler->decode(raw->input_dev, ev);
+ raw->prev_ev = ev;
+ }
+
+ mutex_unlock(&ir_raw_handler_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
}
+
+ return 0;
}
/**
@@ -66,6 +77,9 @@ int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
if (!ir->raw)
return -EINVAL;
+ IR_dprintk(2, "sample: (05%dus %s)\n",
+ TO_US(ev->duration), TO_STR(ev->pulse));
+
if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
return -ENOMEM;
@@ -126,6 +140,90 @@ int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type typ
EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
/**
+ * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
+ * @input_dev: the struct input_dev device descriptor
+ * @type: the type of the event that has occurred
+ *
+ * This routine (which may be called from an interrupt context) works
+ * in similiar manner to ir_raw_event_store_edge.
+ * This routine is intended for devices with limited internal buffer
+ * It automerges samples of same type, and handles timeouts
+ */
+int ir_raw_event_store_with_filter(struct input_dev *input_dev,
+ struct ir_raw_event *ev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ struct ir_raw_event_ctrl *raw = ir->raw;
+
+ if (!raw || !ir->props)
+ return -EINVAL;
+
+ /* Ignore spaces in idle mode */
+ if (ir->idle && !ev->pulse)
+ return 0;
+ else if (ir->idle)
+ ir_raw_event_set_idle(input_dev, 0);
+
+ if (!raw->this_ev.duration) {
+ raw->this_ev = *ev;
+ } else if (ev->pulse == raw->this_ev.pulse) {
+ raw->this_ev.duration += ev->duration;
+ } else {
+ ir_raw_event_store(input_dev, &raw->this_ev);
+ raw->this_ev = *ev;
+ }
+
+ /* Enter idle mode if nessesary */
+ if (!ev->pulse && ir->props->timeout &&
+ raw->this_ev.duration >= ir->props->timeout)
+ ir_raw_event_set_idle(input_dev, 1);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
+
+void ir_raw_event_set_idle(struct input_dev *input_dev, int idle)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ struct ir_raw_event_ctrl *raw = ir->raw;
+ ktime_t now;
+ u64 delta;
+
+ if (!ir->props)
+ return;
+
+ if (!ir->raw)
+ goto out;
+
+ if (idle) {
+ IR_dprintk(2, "enter idle mode\n");
+ raw->last_event = ktime_get();
+ } else {
+ IR_dprintk(2, "exit idle mode\n");
+
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event));
+
+ WARN_ON(raw->this_ev.pulse);
+
+ raw->this_ev.duration =
+ min(raw->this_ev.duration + delta,
+ (u64)IR_MAX_DURATION);
+
+ ir_raw_event_store(input_dev, &raw->this_ev);
+
+ if (raw->this_ev.duration == IR_MAX_DURATION)
+ ir_raw_event_reset(input_dev);
+
+ raw->this_ev.duration = 0;
+ }
+out:
+ if (ir->props->s_idle)
+ ir->props->s_idle(ir->props->priv, idle);
+ ir->idle = idle;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
+
+/**
* ir_raw_event_handle() - schedules the decoding of stored ir data
* @input_dev: the struct input_dev device descriptor
*
@@ -138,7 +236,7 @@ void ir_raw_event_handle(struct input_dev *input_dev)
if (!ir->raw)
return;
- schedule_work(&ir->raw->rx_work);
+ wake_up_process(ir->raw->thread);
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
@@ -147,9 +245,9 @@ u64
ir_raw_get_allowed_protocols()
{
u64 protocols;
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
protocols = available_protocols;
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
return protocols;
}
@@ -167,7 +265,7 @@ int ir_raw_event_register(struct input_dev *input_dev)
return -ENOMEM;
ir->raw->input_dev = input_dev;
- INIT_WORK(&ir->raw->rx_work, ir_raw_event_work);
+
ir->raw->enabled_protocols = ~0;
rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE,
GFP_KERNEL);
@@ -177,12 +275,21 @@ int ir_raw_event_register(struct input_dev *input_dev)
return rc;
}
- spin_lock(&ir_raw_handler_lock);
+ ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw,
+ "rc%u", (unsigned int)ir->devno);
+
+ if (IS_ERR(ir->raw->thread)) {
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return PTR_ERR(ir->raw->thread);
+ }
+
+ mutex_lock(&ir_raw_handler_lock);
list_add_tail(&ir->raw->list, &ir_raw_client_list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_register)
handler->raw_register(ir->raw->input_dev);
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
return 0;
}
@@ -195,14 +302,14 @@ void ir_raw_event_unregister(struct input_dev *input_dev)
if (!ir->raw)
return;
- cancel_work_sync(&ir->raw->rx_work);
+ kthread_stop(ir->raw->thread);
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
list_del(&ir->raw->list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_unregister)
handler->raw_unregister(ir->raw->input_dev);
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
kfifo_free(&ir->raw->kfifo);
kfree(ir->raw);
@@ -217,13 +324,13 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
{
struct ir_raw_event_ctrl *raw;
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
if (ir_raw_handler->raw_register)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->input_dev);
available_protocols |= ir_raw_handler->protocols;
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
return 0;
}
@@ -233,13 +340,13 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
{
struct ir_raw_event_ctrl *raw;
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
list_del(&ir_raw_handler->list);
if (ir_raw_handler->raw_unregister)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_unregister(raw->input_dev);
available_protocols &= ~ir_raw_handler->protocols;
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);