aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-omap2/Makefile1
-rw-r--r--arch/arm/mach-omap2/omap_hsi.c420
-rw-r--r--arch/arm/mach-omap2/pm44xx.c11
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hsi.h494
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/omap_hsi/Kconfig70
-rw-r--r--drivers/omap_hsi/Makefile21
-rw-r--r--drivers/omap_hsi/hsi-char.c556
-rw-r--r--drivers/omap_hsi/hsi-char.h35
-rw-r--r--drivers/omap_hsi/hsi-if.c672
-rw-r--r--drivers/omap_hsi/hsi-if.h69
-rw-r--r--drivers/omap_hsi/hsi-protocol-if.h187
-rw-r--r--drivers/omap_hsi/hsi_driver.c1147
-rw-r--r--drivers/omap_hsi/hsi_driver.h398
-rw-r--r--drivers/omap_hsi/hsi_driver_bus.c203
-rw-r--r--drivers/omap_hsi/hsi_driver_debugfs.c500
-rw-r--r--drivers/omap_hsi/hsi_driver_dma.c643
-rw-r--r--drivers/omap_hsi/hsi_driver_fifo.c325
-rw-r--r--drivers/omap_hsi/hsi_driver_gpio.c75
-rw-r--r--drivers/omap_hsi/hsi_driver_if.c965
-rw-r--r--drivers/omap_hsi/hsi_driver_int.c717
-rw-r--r--drivers/omap_hsi/hsi_protocol.c308
-rw-r--r--drivers/omap_hsi/hsi_protocol_cmd.c429
-rw-r--r--drivers/omap_hsi/hsi_protocol_if.c896
-rw-r--r--include/linux/hsi_char.h71
-rw-r--r--include/linux/hsi_driver_if.h181
27 files changed, 9397 insertions, 0 deletions
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 7ef7745..9ceea9b 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -286,3 +286,4 @@ obj-y += $(disp-m) $(disp-y)
obj-y += common-board-devices.o
obj-$(CONFIG_OMAP_REMOTE_PROC) += remoteproc.o
+obj-$(CONFIG_OMAP_HSI_DEVICE) += omap_hsi.o
diff --git a/arch/arm/mach-omap2/omap_hsi.c b/arch/arm/mach-omap2/omap_hsi.c
new file mode 100644
index 0000000..f3c677a
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hsi.c
@@ -0,0 +1,420 @@
+/*
+ * arch/arm/mach-omap2/hsi.c
+ *
+ * HSI device definition
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Original Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/notifier.h>
+#include <linux/hsi_driver_if.h>
+
+#include <asm/clkdev.h>
+
+#include <plat/omap_hsi.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+
+#include <../drivers/omap_hsi/hsi_driver.h>
+#include "clock.h"
+#include "mux.h"
+#include "control.h"
+
+static int omap_hsi_wakeup_enable(int hsi_port);
+static int omap_hsi_wakeup_disable(int hsi_port);
+#define OMAP_HSI_PLATFORM_DEVICE_DRIVER_NAME "omap_hsi"
+#define OMAP_HSI_PLATFORM_DEVICE_NAME "omap_hsi.0"
+#define OMAP_HSI_HWMOD_NAME "hsi"
+#define OMAP_HSI_HWMOD_CLASSNAME "hsi"
+#define OMAP_HSI_PADCONF_CAWAKE_PIN "usbb1_ulpitll_clk.hsi1_cawake"
+#define OMAP_HSI_PADCONF_CAWAKE_MODE OMAP_MUX_MODE1
+
+
+#define OMAP_MUX_MODE_MASK 0x7
+
+
+/* Hack till correct hwmod-mux api gets used */
+#define CA_WAKE_MUX_REG (0x4a1000C2)
+#define OMAP44XX_PADCONF_WAKEUPENABLE0 (1 << 14)
+#define OMAP44XX_PADCONF_WAKEUPEVENT0 (1 << 15)
+
+static int omap_mux_read_signal(const char *muxname)
+{
+ u16 val = 0;
+ val = omap_readw(CA_WAKE_MUX_REG);
+ return val;
+}
+
+static int omap_mux_enable_wakeup(const char *muxname)
+{
+ u16 val = 0;
+ val = omap_readw(CA_WAKE_MUX_REG);
+ val |= OMAP44XX_PADCONF_WAKEUPENABLE0;
+ omap_writew(val, CA_WAKE_MUX_REG);
+ return 0;
+}
+
+static int omap_mux_disable_wakeup(const char *muxname)
+{
+ u16 val = 0;
+ val = omap_readw(CA_WAKE_MUX_REG);
+ val &= ~OMAP44XX_PADCONF_WAKEUPENABLE0;
+ omap_writew(val, CA_WAKE_MUX_REG);
+ return 0;
+}
+
+/*
+ * NOTE: We abuse a little bit the struct port_ctx to use it also for
+ * initialization.
+ */
+
+
+static struct port_ctx hsi_port_ctx[] = {
+ [0] = {
+ .hst.mode = HSI_MODE_FRAME,
+ .hst.flow = HSI_FLOW_SYNCHRONIZED,
+ .hst.frame_size = HSI_FRAMESIZE_DEFAULT,
+ .hst.divisor = HSI_DIVISOR_DEFAULT,
+ .hst.channels = HSI_CHANNELS_DEFAULT,
+ .hst.arb_mode = HSI_ARBMODE_ROUNDROBIN,
+ .hsr.mode = HSI_MODE_FRAME,
+ .hsr.flow = HSI_FLOW_SYNCHRONIZED,
+ .hsr.frame_size = HSI_FRAMESIZE_DEFAULT,
+ .hsr.channels = HSI_CHANNELS_DEFAULT,
+ .hsr.divisor = HSI_DIVISOR_DEFAULT,
+ .hsr.counters = HSI_COUNTERS_FT_DEFAULT |
+ HSI_COUNTERS_TB_DEFAULT |
+ HSI_COUNTERS_FB_DEFAULT,
+ },
+};
+
+static struct ctrl_ctx hsi_ctx = {
+ .sysconfig = 0,
+ .gdd_gcr = 0,
+ .dll = 0,
+ .pctx = hsi_port_ctx,
+};
+
+static struct hsi_platform_data omap_hsi_platform_data = {
+ .num_ports = ARRAY_SIZE(hsi_port_ctx),
+ .hsi_gdd_chan_count = HSI_HSI_DMA_CHANNEL_MAX,
+ .default_hsi_fclk = HSI_DEFAULT_FCLK,
+ .ctx = &hsi_ctx,
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
+ .device_shutdown = omap_device_shutdown,
+ .wakeup_enable = omap_hsi_wakeup_enable,
+ .wakeup_disable = omap_hsi_wakeup_disable,
+ .wakeup_is_from_hsi = omap_hsi_is_io_wakeup_from_hsi,
+ .board_suspend = omap_hsi_prepare_suspend,
+};
+
+
+static struct platform_device *hsi_get_hsi_platform_device(void)
+{
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /* HSI_TODO: handle platform device id (or port) (0/1) */
+ dev = bus_find_device_by_name(&platform_bus_type, NULL,
+ OMAP_HSI_PLATFORM_DEVICE_NAME);
+ if (!dev) {
+ pr_debug("Could not find platform device %s\n",
+ OMAP_HSI_PLATFORM_DEVICE_NAME);
+ return 0;
+ }
+
+ if (!dev->driver) {
+ /* Could not find driver for platform device. */
+ return 0;
+ }
+
+ pdev = to_platform_device(dev);
+
+ return pdev;
+}
+
+static struct hsi_dev *hsi_get_hsi_controller_data(struct platform_device *pd)
+{
+ struct hsi_dev *hsi_ctrl;
+
+ if (!pd)
+ return 0;
+
+ hsi_ctrl = (struct hsi_dev *) platform_get_drvdata(pd);
+ if (!hsi_ctrl) {
+ pr_err("Could not find HSI controller data\n");
+ return 0;
+ }
+
+ return hsi_ctrl;
+}
+
+/**
+* omap_hsi_is_io_pad_hsi - Indicates if IO Pad has been muxed for HSI CAWAKE
+*
+* Return value :* 0 if CAWAKE Padconf has not been found or CAWAKE not muxed for
+* CAWAKE
+* * else 1
+*/
+static int omap_hsi_is_io_pad_hsi(void)
+{
+ u16 val;
+
+ /* Check for IO pad */
+ val = omap_mux_read_signal(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ if (val == -ENODEV)
+ return 0;
+
+ /* Continue only if CAWAKE is muxed */
+ if ((val & OMAP_MUX_MODE_MASK) != OMAP_HSI_PADCONF_CAWAKE_MODE)
+ return 0;
+
+ return 1;
+}
+
+/**
+* omap_hsi_is_io_wakeup_from_hsi - Indicates an IO wakeup from HSI CAWAKE
+*
+* Return value :* 0 if CAWAKE Padconf has not been found or no IOWAKEUP event
+* occured for CAWAKE
+* * else 1
+* TODO : return value should indicate the HSI port which has awaken
+*/
+int omap_hsi_is_io_wakeup_from_hsi(void)
+{
+ u16 val;
+
+ /* Check for IO pad wakeup */
+ val = omap_mux_read_signal(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ if (val == -ENODEV)
+ return 0;
+
+ /* Continue only if CAWAKE is muxed */
+ if ((val & OMAP_MUX_MODE_MASK) != OMAP_HSI_PADCONF_CAWAKE_MODE)
+ return 0;
+
+ if (val & OMAP44XX_PADCONF_WAKEUPEVENT0)
+ return 1;
+
+ return 0;
+}
+
+/**
+* omap_hsi_wakeup_enable - Enable HSI wakeup feature from RET/OFF mode
+*
+* @hsi_port - reference to the HSI port onto which enable wakeup feature.
+*
+* Return value :* 0 if CAWAKE has been configured to wakeup platform
+* * -ENODEV if CAWAKE is not muxed on padconf
+*/
+static int omap_hsi_wakeup_enable(int hsi_port)
+{
+ int ret = -ENODEV;
+
+ if (omap_hsi_is_io_pad_hsi())
+ ret = omap_mux_enable_wakeup(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ else
+ pr_debug("Trying to enable HSI IO wakeup on non HSI board\n");
+
+
+ /* TODO: handle hsi_port param and use it to find the correct Pad */
+ return ret;
+}
+
+/**
+* omap_hsi_wakeup_disable - Disable HSI wakeup feature from RET/OFF mode
+*
+* @hsi_port - reference to the HSI port onto which disable wakeup feature.
+*
+* Return value :* 0 if CAWAKE has been configured to not wakeup platform
+* * -ENODEV if CAWAKE is not muxed on padconf
+*/
+static int omap_hsi_wakeup_disable(int hsi_port)
+{
+ int ret = -ENODEV;
+
+ if (omap_hsi_is_io_pad_hsi())
+ ret = omap_mux_disable_wakeup(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ else
+ pr_debug("Trying to disable HSI IO wakeup on non HSI board\n");
+
+
+ /* TODO: handle hsi_port param and use it to find the correct Pad */
+
+ return ret;
+}
+
+/**
+* omap_hsi_prepare_suspend - Prepare HSI for suspend mode
+*
+* Return value :* 0 if CAWAKE padconf has been configured properly
+* * -ENODEV if CAWAKE is not muxed on padconf.
+*
+*/
+int omap_hsi_prepare_suspend(int hsi_port, bool dev_may_wakeup)
+{
+ int ret;
+
+ if (dev_may_wakeup)
+ ret = omap_hsi_wakeup_enable(hsi_port);
+ else
+ ret = omap_hsi_wakeup_disable(hsi_port);
+
+ return ret;
+}
+
+/**
+* omap_hsi_wakeup - Prepare HSI for wakeup from suspend mode (RET/OFF)
+*
+* Return value : 1 if IO wakeup source is HSI
+* 0 if IO wakeup source is not HSI.
+*/
+int omap_hsi_wakeup(int hsi_port)
+{
+ static struct platform_device *pdev;
+ static struct hsi_dev *hsi_ctrl;
+
+ if (!pdev) {
+ pdev = hsi_get_hsi_platform_device();
+ if (!pdev)
+ return -ENODEV;
+}
+
+ if (!device_may_wakeup(&pdev->dev)) {
+ dev_info(&pdev->dev, "Modem not allowed to wakeup platform");
+ return -EPERM;
+ }
+
+ if (!hsi_ctrl) {
+ hsi_ctrl = hsi_get_hsi_controller_data(pdev);
+ if (!hsi_ctrl)
+ return -ENODEV;
+ }
+
+ dev_dbg(hsi_ctrl->dev, "Modem wakeup detected from HSI CAWAKE Pad");
+
+ /* CAWAKE falling or rising edge detected */
+ hsi_ctrl->hsi_port->cawake_off_event = true;
+ tasklet_hi_schedule(&hsi_ctrl->hsi_port->hsi_tasklet);
+
+ /* Disable interrupt until Bottom Half has cleared */
+ /* the IRQ status register */
+ disable_irq_nosync(hsi_ctrl->hsi_port->irq);
+
+ return 0;
+}
+
+/* HSI_TODO : This requires some fine tuning & completion of
+ * activate/deactivate latency values
+ */
+static struct omap_device_pm_latency omap_hsi_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+/* HSI device registration */
+static int __init omap_hsi_register(struct omap_hwmod *oh, void *user)
+{
+ struct omap_device *od;
+ struct hsi_platform_data *pdata = &omap_hsi_platform_data;
+
+ if (!oh) {
+ pr_err("Could not look up %s omap_hwmod\n",
+ OMAP_HSI_HWMOD_NAME);
+ return -EEXIST;
+ }
+
+ od = omap_device_build(OMAP_HSI_PLATFORM_DEVICE_DRIVER_NAME, 0, oh,
+ pdata, sizeof(*pdata), omap_hsi_latency,
+ ARRAY_SIZE(omap_hsi_latency), false);
+ WARN(IS_ERR(od), "Can't build omap_device for %s:%s.\n",
+ OMAP_HSI_PLATFORM_DEVICE_DRIVER_NAME, oh->name);
+
+ pr_info("HSI: device registered as omap_hwmod: %s\n", oh->name);
+ return 0;
+}
+
+static void __init omap_4430hsi_pad_conf(void)
+{
+ /*
+ * HSI pad conf: hsi1_ca/ac_wake/flag/data/ready
+ * Also configure gpio_92/95/157/187 used by modem
+ */
+ /* hsi1_cawake */
+ omap_mux_init_signal("usbb1_ulpitll_clk.hsi1_cawake", \
+ OMAP_PIN_INPUT_PULLDOWN | \
+ OMAP_PIN_OFF_NONE | \
+ OMAP_PIN_OFF_WAKEUPENABLE);
+ /* hsi1_caflag */
+ omap_mux_init_signal("usbb1_ulpitll_dir.hsi1_caflag", \
+ OMAP_PIN_INPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_cadata */
+ omap_mux_init_signal("usbb1_ulpitll_stp.hsi1_cadata", \
+ OMAP_PIN_INPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_acready */
+ omap_mux_init_signal("usbb1_ulpitll_nxt.hsi1_acready", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_OUTPUT_LOW);
+ /* hsi1_acwake */
+ omap_mux_init_signal("usbb1_ulpitll_dat0.hsi1_acwake", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_acdata */
+ omap_mux_init_signal("usbb1_ulpitll_dat1.hsi1_acdata", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_acflag */
+ omap_mux_init_signal("usbb1_ulpitll_dat2.hsi1_acflag", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_caready */
+ omap_mux_init_signal("usbb1_ulpitll_dat3.hsi1_caready", \
+ OMAP_PIN_INPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* gpio_92 */
+ omap_mux_init_signal("usbb1_ulpitll_dat4.gpio_92", \
+ OMAP_PULL_ENA);
+ /* gpio_95 */
+ omap_mux_init_signal("usbb1_ulpitll_dat7.gpio_95", \
+ OMAP_PIN_INPUT_PULLDOWN | \
+ OMAP_PIN_OFF_NONE);
+ /* gpio_157 */
+ omap_mux_init_signal("usbb2_ulpitll_clk.gpio_157", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* gpio_187 */
+ omap_mux_init_signal("sys_boot3.gpio_187", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+}
+
+/* HSI devices registration */
+int __init omap_hsi_init(void)
+{
+ omap_4430hsi_pad_conf();
+ /* Keep this for genericity, although there is only one hwmod for HSI */
+ return omap_hwmod_for_each_by_class(OMAP_HSI_HWMOD_CLASSNAME,
+ omap_hsi_register, NULL);
+}
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index cb4457f..12400d0 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -356,6 +356,17 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
/* Check if a IO_ST interrupt */
if (irqstatus_mpu & OMAP4430_IO_ST_MASK) {
+
+ /* Check if HSI caused the IO wakeup */
+ #define CA_WAKE_MUX_REG (0x4a1000C2)
+ #define CM_L3INIT_HSI_CLKCTRL (0x4a009338)
+ #define HSI_SYSCONFIG (0x4a058010)
+ if (omap_readw(CA_WAKE_MUX_REG) & (1<<15)) {
+ /* Enable HSI module */
+ omap_writel(omap_readl(CM_L3INIT_HSI_CLKCTRL) | 0x1, CM_L3INIT_HSI_CLKCTRL);
+ /* Put HSI in: No-standby and No-idle */
+ omap_writel( (1<<3) | (1<<12), HSI_SYSCONFIG);
+ }
omap4_trigger_ioctrl();
}
diff --git a/arch/arm/plat-omap/include/plat/omap_hsi.h b/arch/arm/plat-omap/include/plat/omap_hsi.h
new file mode 100644
index 0000000..b5a5334
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap_hsi.h
@@ -0,0 +1,494 @@
+/*
+ * /mach/omap_hsi.h
+ *
+ * Hardware definitions for HSI and SSI.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* NOTE: This file defines the registers address offsets for both the
+ * SSI and HSI devices. Most of the registers share the same offset between
+ * these devices.
+ * When common or HSI only, the constants are name HSI*. Else the SSI specific
+ * constants are name HSI_SSI*
+ */
+
+#ifndef __OMAP_HSI_H__
+#define __OMAP_HSI_H__
+
+/* Set the HSI Functional Clock to 96MHz.
+ * This is to ensure HSI will function even at OPP50. */
+#define HSI_DEFAULT_FCLK 96000000 /* 96 MHz */
+
+
+#define HSI_PORT_OFFSET 0x1000
+
+/*
+ * GDD base addr : 0x48059000 (SSI)
+ * GDD base addr : 0x4A059000 (HSI)
+ */
+#define HSI_GDD_OFFSET 0x1000
+#define HSI_GDD_BASE HSI_GDD_OFFSET /* 0x9000 */
+
+/*
+ * HST base addr:
+ * port 1: 0x4805a000 (SSI) - 0x4A05a000 (HSI)
+ * port 2: 0x4805b000 (SSI) - 0x4a05b000 (HSI)
+ */
+#define HSI_HST_OFFSET 0x2000
+#define HSI_HST_BASE(port) (HSI_HST_OFFSET + (((port) - 1) *\
+ (HSI_PORT_OFFSET)))
+ /*
+ * HSR base addr:
+ * port 1: 0x4805a800 (SSI) - 0x4A05a800 (HSI)
+ * port 2: 0x4805b800 (SSI) - 0x4A05b800 (HSI)
+ */
+#define HSI_HSR_OFFSET 0x2800
+#define HSI_HSR_BASE(port) (HSI_HSR_OFFSET + (((port) - 1) *\
+ (HSI_PORT_OFFSET)))
+/*
+ * HSI SYS registers
+ */
+#define HSI_SYS_REVISION_REG 0x0000
+#define HSI_SSI_REV_MASK 0x000000ff
+#define HSI_SSI_REV_MAJOR 0xf0
+#define HSI_SSI_REV_MINOR 0x0f
+
+#define HSI_SYS_SYSCONFIG_REG 0x0010
+#define HSI_AUTOIDLE (1 << 0)
+#define HSI_SOFTRESET (1 << 1)
+#define HSI_FREE_EMU (1 << 2) /* Only for HSI */
+#define HSI_SIDLEMODE_FORCE 0
+#define HSI_SIDLEMODE_NO (1 << 3)
+#define HSI_SIDLEMODE_SMART (1 << 4)
+#define HSI_SIDLEMODE_SMART_WAKEUP (3 << 3)
+#define HSI_SIDLEMODE_MASK 0x00000018
+#define HSI_MIDLEMODE_FORCE 0
+#define HSI_MIDLEMODE_NO (1 << 12)
+#define HSI_MIDLEMODE_SMART (1 << 13)
+#define HSI_MIDLEMODE_SMART_WAKEUP (3 << 12)
+#define HSI_MIDLEMODE_MASK 0x00003000
+
+#define HSI_SYS_SYSSTATUS_REG 0x0014
+#define HSI_RESETDONE 1
+
+#define HSI_SYS_MPU_STATUS_BASE 0x0808
+#define HSI_SYS_MPU_STATUS_PORT_OFFSET 0x10
+#define HSI_SYS_MPU_STATUS_IRQ_OFFSET 8
+
+#define HSI_SYS_MPU_STATUS_REG(port, irq) \
+ (HSI_SYS_MPU_STATUS_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_STATUS_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_STATUS_IRQ_OFFSET)))
+#define HSI_SYS_MPU_ENABLE_BASE 0x080c
+#define HSI_SYS_MPU_ENABLE_PORT_OFFSET 0x10
+#define HSI_SYS_MPU_ENABLE_IRQ_OFFSET 8
+
+#define HSI_SYS_MPU_ENABLE_REG(port, irq) \
+ (HSI_SYS_MPU_ENABLE_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_ENABLE_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_ENABLE_IRQ_OFFSET)))
+#define HSI_HST_DATAACCEPT(channel) (((channel) < 8) ? \
+ (1 << (channel)) : \
+ (1 << ((channel) - 8)))
+#define HSI_HSR_DATAAVAILABLE(channel) ((channel) < 8 ? \
+ (1 << ((channel) + 8)) : \
+ (1 << ((channel) - 8 + 8)))
+#define HSI_HSR_DATAOVERRUN(channel) ((channel) < 8 ? \
+ (1 << ((channel) + 16)) : \
+ (1 << ((channel) - 8 + 16)))
+
+#define HSI_ERROROCCURED (1 << 24)
+#define HSI_BREAKDETECTED (1 << 25)
+#define HSI_CAWAKEDETECTED (1 << 26)
+
+#define HSI_SYS_GDD_MPU_IRQ_STATUS_REG 0x0800
+#define HSI_SYS_GDD_MPU_IRQ_ENABLE_REG 0x0804
+#define HSI_GDD_LCH(channel) (1 << (channel))
+
+
+#define HSI_SYS_WAKE_OFFSET 0x10
+#define HSI_SYS_WAKE_BASE 0x0c00
+#define HSI_SYS_WAKE_REG(port) (HSI_SYS_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SYS_CLEAR_WAKE_BASE 0x0c04
+#define HSI_SYS_CLEAR_WAKE_REG(port) (HSI_SYS_CLEAR_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SYS_SET_WAKE_BASE 0x0c08
+#define HSI_SYS_SET_WAKE_REG(port) (HSI_SYS_SET_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SSI_WAKE_MASK 0xff /* for SSI */
+#define HSI_WAKE_MASK 0xffff /* for HSI */
+#define HSI_SET_WAKE_4_WIRES (0 << 16)
+#define HSI_SET_WAKE_READY_LVL_0 (0 << 17)
+#define HSI_SET_WAKE(channel) (1 << (channel) |\
+ HSI_SET_WAKE_4_WIRES |\
+ HSI_SET_WAKE_READY_LVL_0)
+#define HSI_CLEAR_WAKE(channel) (1 << (channel))
+#define HSI_WAKE(channel) (1 << (channel))
+
+#define HSI_SYS_HWINFO_REG 0x0004 /* only for HSI */
+
+/* Additional registers definitions (for channels 8 .. 15) for HSI */
+#define HSI_SYS_MPU_U_STATUS_BASE 0x0408
+#define HSI_SYS_MPU_U_STATUS_REG(port, irq) \
+ (HSI_SYS_MPU_U_STATUS_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_STATUS_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_STATUS_IRQ_OFFSET)))
+
+#define HSI_SYS_MPU_U_ENABLE_BASE 0x040c
+#define HSI_SYS_MPU_U_ENABLE_REG(port, irq) \
+ (HSI_SYS_MPU_U_ENABLE_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_ENABLE_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_ENABLE_IRQ_OFFSET)))
+
+/*
+ * HSI HST registers
+ */
+#define HSI_HST_ID_REG(port) (HSI_HST_BASE(port) + 0x0000)
+
+#define HSI_HST_MODE_REG(port) (HSI_HST_BASE(port) + 0x0004)
+#define HSI_MODE_VAL_MASK 3
+#define HSI_MODE_SLEEP 0
+#define HSI_MODE_STREAM 1
+#define HSI_MODE_FRAME 2
+#define HSI_SSI_MODE_MULTIPOINTS 3 /* SSI only */
+#define HSI_FLOW_OFFSET 2 /* HSI only */
+#define HSI_FLOW_VAL_MASK 3 /* HSI only */
+#define HSI_FLOW_SYNCHRONIZED 0 /* HSI only */
+#define HSI_FLOW_PIPELINED 1 /* HSI only */
+#define HSI_FLOW_REAL_TIME 2 /* HSI only */
+#define HSI_HST_MODE_WAKE_CTRL_AUTO (1 << 4) /* HSI only */
+#define HSI_HST_MODE_WAKE_CTRL_SW (0 << 4) /* HSI only */
+
+#define HSI_HST_FRAMESIZE_REG(port) (HSI_HST_BASE(port) + 0x0008)
+#define HSI_FRAMESIZE_DEFAULT 31
+#define HSI_FRAMESIZE_MAX 0x1f
+
+#define HSI_HST_TXSTATE_REG(port) (HSI_HST_BASE(port) + 0x000c)
+#define HSI_HST_TXSTATE_VAL_MASK 0x07
+#define HSI_HST_TXSTATE_IDLE 0
+
+#define HSI_HST_BUFSTATE_REG(port) (HSI_HST_BASE(port) + 0x0010)
+#define HSI_HST_BUFSTATE_FIFO_REG(fifo) (((fifo) < 8) ? \
+ HSI_HST_BUFSTATE_REG(1) : \
+ HSI_HST_BUFSTATE_REG(2))
+#define HSI_BUFSTATE_CHANNEL(channel) ((channel) < 8 ? \
+ (1 << (channel)) : \
+ (1 << ((channel) - 8)))
+
+#define HSI_HST_DIVISOR_REG(port) (HSI_HST_BASE(port) + 0x0018)
+#define HSI_DIVISOR_DEFAULT 1
+#define HSI_SSI_MAX_TX_DIVISOR 0x7f /* for SSI */
+#define HSI_MAX_TX_DIVISOR 0xff /* for HSI */
+
+#define HSI_HST_BREAK_REG(port) (HSI_HST_BASE(port) + 0x0020)
+#define HSI_HST_CHANNELS_REG(port) (HSI_HST_BASE(port) + 0x0024)
+#define HSI_CHANNELS_DEFAULT 4
+#define HSI_SSI_CHANNELS_MAX 8 /* for SSI */
+#define HSI_CHANNELS_MAX 16 /* for HSI */
+
+#define HSI_HST_ARBMODE_REG(port) (HSI_HST_BASE(port) + 0x0028)
+#define HSI_ARBMODE_ROUNDROBIN 0
+#define HSI_ARBMODE_PRIORITY 1
+
+#define HSI_HST_BUFFER_BASE(port) (HSI_HST_BASE(port) + 0x0080)
+#define HSI_HST_BUFFER_CH_REG(port, channel) (HSI_HST_BUFFER_BASE(port) +\
+ ((channel) * 4))
+#define HSI_HST_BUFFER_FIFO_REG(fifo) (((fifo) < 8) ? \
+ (HSI_HST_BUFFER_CH_REG(1, (fifo))) : \
+ (HSI_HST_BUFFER_CH_REG(2, (fifo) - 8)))
+
+#define HSI_HST_SWAPBUF_BASE(port) (HSI_HST_BASE(port) + 0x00c0)
+#define HSI_HST_SWAPBUF_CH_REG(port, channel) (HSI_HST_SWAPBUF_BASE(port) +\
+ ((channel) * 4))
+
+
+/* Additional registers for HSI */
+#define HSI_HST_FIFO_COUNT 16
+#define HSI_HST_FIFO_SIZE 8
+#define HSI_HST_MAPPING_FIFO_REG(fifo) (HSI_HST_BASE(1) + 0x0100 +\
+ ((fifo) * 4))
+#define HSI_MAPPING_ENABLE 1
+#define HSI_MAPPING_CH_NUMBER_OFFSET 1
+#define HSI_MAPPING_PORT_NUMBER_OFFSET 7
+#define HSI_HST_MAPPING_THRESH_OFFSET 10
+#define HSI_HST_MAPPING_THRESH_VALUE (0x0 << HSI_HST_MAPPING_THRESH_OFFSET)
+
+/*
+ * HSI HSR registers
+ */
+#define HSI_HSR_ID_REG(port) (HSI_HSR_BASE(port) + 0x0000)
+
+#define HSI_HSR_MODE_REG(port) (HSI_HSR_BASE(port) + 0x0004)
+
+#define HSI_HSR_MODE_MODE_VAL_MASK (3 << 0) /* HSI only */
+#define HSI_HSR_MODE_FLOW_VAL_MASK (3 << 2) /* HSI only */
+#define HSI_HSR_MODE_WAKE_STATUS (1 << 4) /* HSI only */
+#define HSI_HSR_MODE_MODE_VAL_SLEEP 0xFFFFFFFC /* HSI only */
+
+#define HSI_HSR_FRAMESIZE_REG(port) (HSI_HSR_BASE(port) + 0x0008)
+
+#define HSI_HSR_RXSTATE_REG(port) (HSI_HSR_BASE(port) + 0x000c)
+
+#define HSI_HSR_BUFSTATE_REG(port) (HSI_HSR_BASE(port) + 0x0010)
+#define HSI_HSR_BUFSTATE_FIFO_REG(fifo) (((fifo) < 8) ? \
+ HSI_HSR_BUFSTATE_REG(1) : \
+ HSI_HSR_BUFSTATE_REG(2))
+
+#define HSI_HSR_BREAK_REG(port) (HSI_HSR_BASE(port) + 0x001c)
+
+#define HSI_HSR_ERROR_REG(port) (HSI_HSR_BASE(port) + 0x0020)
+#define HSI_HSR_ERROR_SIG 1
+#define HSI_HSR_ERROR_FTE (1 << 1) /* HSI only */
+#define HSI_HSR_ERROR_TBE (1 << 4) /* HSI only */
+#define HSI_HSR_ERROR_RME (1 << 7) /* HSI only */
+#define HSI_HSR_ERROR_TME (1 << 11) /* HSI only */
+
+#define HSI_HSR_ERRORACK_REG(port) (HSI_HSR_BASE(port) + 0x0024)
+
+#define HSI_HSR_CHANNELS_REG(port) (HSI_HSR_BASE(port) + 0x0028)
+
+#define HSI_HSR_OVERRUN_REG(port) (HSI_HSR_BASE(port) + 0x002c)
+
+#define HSI_HSR_OVERRUNACK_REG(port) (HSI_HSR_BASE(port) + 0x0030)
+
+#define HSI_HSR_COUNTERS_REG(port) (HSI_HSR_BASE(port) + 0x0034)
+#define SSI_TIMEOUT_REG(port) (HSI_HSR_COUNTERS_REG(port))
+#define HSI_TIMEOUT_DEFAULT 0 /* SSI only */
+#define HSI_SSI_RX_TIMEOUT_OFFSET 0 /* SSI only */
+#define HSI_SSI_RX_TIMEOUT_MASK 0x1ff /* SSI only */
+#define HSI_COUNTERS_FT_MASK 0x000fffff /* HSI only */
+#define HSI_COUNTERS_TB_MASK 0x00f00000 /* HSI only */
+#define HSI_COUNTERS_FB_MASK 0xff000000 /* HSI only */
+#define HSI_COUNTERS_FT_OFFSET 0 /* HSI only */
+#define HSI_COUNTERS_TB_OFFSET 20 /* HSI only */
+#define HSI_COUNTERS_FB_OFFSET 24 /* HSI only */
+/* Default FT value: 2 x max_bits_per_frame + 20% margin */
+#define HSI_COUNTERS_FT_DEFAULT (90 << HSI_COUNTERS_FT_OFFSET)
+#define HSI_COUNTERS_TB_DEFAULT (6 << HSI_COUNTERS_TB_OFFSET)
+#define HSI_COUNTERS_FB_DEFAULT (8 << HSI_COUNTERS_FB_OFFSET)
+#define HSI_HSR_COMBINE_COUNTERS(FB, TB, FT) \
+ (((FB << HSI_COUNTERS_FB_OFFSET) & HSI_COUNTERS_FB_MASK) \
+ ((TB << HSI_COUNTERS_TB_OFFSET) & HSI_COUNTERS_TB_MASK) \
+ ((FT << HSI_COUNTERS_FT_OFFSET) & HSI_COUNTERS_FT_MASK))
+#define SSI_SSR_COMBINE_COUNTERS(FT) \
+ ((FT << HSI_SSI_RX_TIMEOUT_OFFSET) & HSI_SSI_RX_TIMEOUT_MASK)
+
+#define HSI_HSR_BUFFER_BASE(port) (HSI_HSR_BASE(port) + 0x0080)
+#define HSI_HSR_BUFFER_CH_REG(port, channel) (HSI_HSR_BUFFER_BASE(port) +\
+ ((channel) * 4))
+#define HSI_HSR_BUFFER_FIFO_REG(fifo) (((fifo) < 8) ? \
+ (HSI_HSR_BUFFER_CH_REG(1, (fifo))) : \
+ (HSI_HSR_BUFFER_CH_REG(2, (fifo) - 8)))
+
+#define HSI_HSR_SWAPBUF_BASE(port) (HSI_HSR_BASE(port) + 0x00c0)
+#define HSI_HSR_SWAPBUF_CH_REG(port, channel) (HSI_HSR_SWAPBUF_BASE(port) +\
+ ((channel) * 4))
+
+/* Additional registers for HSI */
+#define HSI_HSR_FIFO_COUNT 16
+#define HSI_HSR_FIFO_SIZE 8
+#define HSI_HSR_MAPPING_FIFO_REG(fifo) (HSI_HSR_BASE(1) + 0x0100 +\
+ ((fifo) * 4))
+#define HSI_HSR_MAPPING_WORDS_MASK (0xf << 10)
+
+#define HSI_HSR_DLL_REG (HSI_HSR_BASE(1) + 0x0144)
+#define HSI_HSR_DLL_COCHRE 1
+#define HSI_HSR_DLL_COCHGR (1 << 4)
+#define HSI_HSR_DLL_INCO_MASK 0x0003ff00
+#define HSI_HSR_DLL_INCO_OFFSET 8
+
+#define HSI_HSR_DIVISOR_REG(port) (HSI_HSR_BASE(port) + 0x014C)
+#define HSI_HSR_DIVISOR_MASK 0xff
+#define HSI_MAX_RX_DIVISOR 0xff
+
+/*
+ * HSI GDD registers
+ */
+#define HSI_SSI_DMA_CHANNEL_MAX 8
+#define HSI_HSI_DMA_CHANNEL_MAX 16
+
+#define HSI_SSI_GDD_HW_ID_REG (HSI_GDD_BASE + 0x0000)
+
+#define HSI_SSI_GDD_PPORT_ID_REG (HSI_GDD_BASE + 0x0010)
+
+#define HSI_SSI_GDD_MPORT_ID_REG (HSI_GDD_BASE + 0x0014)
+
+#define HSI_SSI_GDD_PPORT_SR_REG (HSI_GDD_BASE + 0x0020)
+#define HSI_PPORT_ACTIVE_LCH_NUMBER_MASK 0xff
+
+#define HSI_GDD_MPORT_SR_REG (HSI_GDD_BASE + 0x0024)
+#define HSI_SSI_MPORT_ACTIVE_LCH_NUMBER_MASK 0xff
+
+#define HSI_SSI_GDD_TEST_REG (HSI_GDD_BASE + 0x0040)
+#define HSI_SSI_TEST 1
+
+#define HSI_GDD_GCR_REG (HSI_GDD_BASE + 0x0100)
+#define HSI_CLK_AUTOGATING_ON (1 << 3)
+#define HSI_SWITCH_OFF (1 << 0)
+
+#define HSI_GDD_GRST_REG (HSI_GDD_BASE + 0x0200)
+#define HSI_GDD_GRST_SWRESET 1
+
+#define HSI_GDD_CSDP_BASE (HSI_GDD_BASE + 0x0800)
+#define HSI_GDD_CSDP_OFFSET 0x40
+#define HSI_GDD_CSDP_REG(channel) (HSI_GDD_CSDP_BASE +\
+ ((channel) * HSI_GDD_CSDP_OFFSET))
+
+#define HSI_DST_BURST_EN_MASK 0xc000
+#define HSI_DST_SINGLE_ACCESS0 0
+#define HSI_DST_SINGLE_ACCESS (1 << 14)
+#define HSI_DST_BURST_4X32_BIT (2 << 14)
+#define HSI_DST_BURST_8x32_BIT (3 << 14)
+
+#define HSI_DST_MASK 0x1e00
+#define HSI_DST_MEMORY_PORT (8 << 9)
+#define HSI_DST_PERIPHERAL_PORT (9 << 9)
+
+#define HSI_SRC_BURST_EN_MASK 0x0180
+#define HSI_SRC_SINGLE_ACCESS0 0
+#define HSI_SRC_SINGLE_ACCESS (1 << 7)
+#define HSI_SRC_BURST_4x32_BIT (2 << 7)
+#define HSI_SRC_BURST_8x32_BIT (3 << 7)
+
+#define HSI_SRC_MASK 0x003c
+#define HSI_SRC_MEMORY_PORT (8 << 2)
+#define HSI_SRC_PERIPHERAL_PORT (9 << 2)
+
+#define HSI_DATA_TYPE_MASK 3
+#define HSI_DATA_TYPE_S32 2
+
+#define HSI_GDD_CCR_BASE (HSI_GDD_BASE + 0x0802)
+#define HSI_GDD_CCR_OFFSET 0x40
+#define HSI_GDD_CCR_REG(channel) (HSI_GDD_CCR_BASE +\
+ ((channel) * HSI_GDD_CCR_OFFSET))
+#define HSI_DST_AMODE_MASK (3 << 14)
+#define HSI_DST_AMODE_CONST 0
+#define HSI_DST_AMODE_POSTINC (1 << 14)
+
+#define HSI_SRC_AMODE_MASK (3 << 12)
+#define HSI_SRC_AMODE_CONST 0
+#define HSI_SRC_AMODE_POSTINC (1 << 12)
+
+#define HSI_CCR_ENABLE (1 << 7)
+
+#define HSI_CCR_SYNC_MASK 0x001f /* only for SSI */
+
+#define HSI_GDD_CCIR_BASE (HSI_GDD_BASE + 0x0804)
+#define HSI_GDD_CCIR_OFFSET 0x40
+#define HSI_GDD_CCIR_REG(channel) (HSI_GDD_CCIR_BASE +\
+ ((channel) * HSI_GDD_CCIR_OFFSET))
+
+#define HSI_BLOCK_IE (1 << 5)
+#define HSI_HALF_IE (1 << 2)
+#define HSI_TOUT_IE (1 << 0)
+
+#define HSI_GDD_CSR_BASE (HSI_GDD_BASE + 0x0806)
+#define HSI_GDD_CSR_OFFSET 0x40
+#define HSI_GDD_CSR_REG(channel) (HSI_GDD_CSR_BASE +\
+ ((channel) * HSI_GDD_CSR_OFFSET))
+
+#define HSI_CSR_SYNC (1 << 6)
+#define HSI_CSR_BLOCK (1 << 5) /* Full block is transferred */
+#define HSI_CSR_HALF (1 << 2) /* Half block is transferred */
+#define HSI_CSR_TOUT (1 << 0) /* Time-out overflow occurs */
+
+#define HSI_GDD_CSSA_BASE (HSI_GDD_BASE + 0x0808)
+#define HSI_GDD_CSSA_OFFSET 0x40
+#define HSI_GDD_CSSA_REG(channel) (HSI_GDD_CSSA_BASE +\
+ ((channel) * HSI_GDD_CSSA_OFFSET))
+
+
+#define HSI_GDD_CDSA_BASE (HSI_GDD_BASE + 0x080c)
+#define HSI_GDD_CDSA_OFFSET 0x40
+#define HSI_GDD_CDSA_REG(channel) (HSI_GDD_CDSA_BASE +\
+ ((channel) * HSI_GDD_CDSA_OFFSET))
+
+#define HSI_GDD_CEN_BASE (HSI_GDD_BASE + 0x0810)
+#define HSI_GDD_CEN_OFFSET 0x40
+#define HSI_GDD_CEN_REG(channel) (HSI_GDD_CEN_BASE +\
+ ((channel) * HSI_GDD_CEN_OFFSET))
+
+
+#define HSI_GDD_CSAC_BASE (HSI_GDD_BASE + 0x0818)
+#define HSI_GDD_CSAC_OFFSET 0x40
+#define HSI_GDD_CSAC_REG(channel) (HSI_GDD_CSAC_BASE +\
+ ((channel) * HSI_GDD_CSAC_OFFSET))
+
+#define HSI_GDD_CDAC_BASE (HSI_GDD_BASE + 0x081a)
+#define HSI_GDD_CDAC_OFFSET 0x40
+#define HSI_GDD_CDAC_REG(channel) (HSI_GDD_CDAC_BASE +\
+ ((channel) * HSI_GDD_CDAC_OFFSET))
+
+#define HSI_SSI_GDD_CLNK_CTRL_BASE (HSI_GDD_BASE + 0x0828)
+#define HSI_SSI_GDD_CLNK_CTRL_OFFSET 0x40
+#define HSI_SSI_GDD_CLNK_CTRL_REG(channel) (HSI_SSI_GDD_CLNK_CTRL_BASE +\
+ (channel * HSI_SSI_GDD_CLNK_CTRL_OFFSET))
+
+#define HSI_SSI_ENABLE_LNK (1 << 15)
+#define HSI_SSI_STOP_LNK (1 << 14)
+#define HSI_SSI_NEXT_CH_ID_MASK 0xf
+
+/*
+ * HSI Helpers
+ */
+#define HSI_SYS_MPU_ENABLE_CH_REG(port, irq, channel) \
+ (((channel) < HSI_SSI_CHANNELS_MAX) ? \
+ HSI_SYS_MPU_ENABLE_REG(port, irq) : \
+ HSI_SYS_MPU_U_ENABLE_REG(port, irq))
+
+#define HSI_SYS_MPU_STATUS_CH_REG(port, irq, channel) \
+ ((channel < HSI_SSI_CHANNELS_MAX) ? \
+ HSI_SYS_MPU_STATUS_REG(port, irq) : \
+ HSI_SYS_MPU_U_STATUS_REG(port, irq))
+/**
+ * struct omap_ssi_config - SSI board configuration
+ * @num_ports: Number of ports in use
+ * @cawake_line: Array of cawake gpio lines
+ */
+struct omap_ssi_board_config {
+ unsigned int num_ports;
+ int cawake_gpio[2];
+};
+extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config);
+
+/**
+ * struct omap_hsi_config - HSI board configuration
+ * @num_ports: Number of ports in use
+ */
+struct omap_hsi_board_config {
+ unsigned int num_ports;
+};
+extern int omap_hsi_config(struct omap_hsi_board_config *hsi_config);
+
+#ifdef CONFIG_OMAP_HSI
+extern int omap_hsi_prepare_suspend(int hsi_port, bool dev_may_wakeup);
+extern int omap_hsi_prepare_idle(void);
+extern int omap_hsi_wakeup(int hsi_port);
+extern int omap_hsi_is_io_wakeup_from_hsi(void);
+#else
+inline int omap_hsi_prepare_suspend(int hsi_port,
+ bool dev_may_wakeup) { return -ENOSYS; }
+inline int omap_hsi_prepare_idle(void) { return -ENOSYS; }
+inline int omap_hsi_wakeup(void) { return -ENOSYS; }
+inline int omap_hsi_is_io_wakeup_from_hsi(void) { return -ENOSYS; }
+
+#endif
+
+#endif /* __OMAP_HSI_H__ */
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d930c6a..1cce7f2 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -133,4 +133,6 @@ source "drivers/remoteproc/Kconfig"
source "drivers/virtio/Kconfig"
source "drivers/rpmsg/Kconfig"
+
+source "drivers/omap_hsi/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 49c39b3..2f047a4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -128,3 +128,4 @@ obj-$(CONFIG_REMOTE_PROC) += remoteproc/
obj-$(CONFIG_DMM_OMAP) += media/
obj-$(CONFIG_TILER_OMAP) += media/
+obj-$(CONFIG_OMAP_HSI) += omap_hsi/
diff --git a/drivers/omap_hsi/Kconfig b/drivers/omap_hsi/Kconfig
new file mode 100644
index 0000000..1f2862f
--- /dev/null
+++ b/drivers/omap_hsi/Kconfig
@@ -0,0 +1,70 @@
+#
+# OMAP HSI driver configuration
+#
+
+config OMAP_HSI
+ bool "OMAP HSI hardware driver"
+ depends on (ARCH_OMAP34XX || ARCH_OMAP4)
+ default n
+ ---help---
+ If you say Y here, you will enable the OMAP HSI hardware driver.
+
+ Note: This module is a unified driver specific to OMAP. Efforts are
+ underway to create a vendor independent implementation.
+
+ The MIPI HSI is a High Speed Synchronous Serial Interface and is
+ defined for communication between two Integrated Circuits (the
+ typical scenario is an application IC and cellular modem IC
+ communication). Data transaction model is peer-to-peer.
+
+ Not all features required for a production device are implemented in
+ this driver. See the documentation for more information.
+
+ This physical layer provides logical channeling and several modes of
+ operation.
+
+ The OMAP HSI driver supports either:
+ - the OMAP MIPI HSI device
+ - the OMAP SSI device
+
+choice
+ prompt "Selected device support file"
+ depends on OMAP_HSI && y
+ default OMAP_HSI_DEVICE
+ ---help---
+ Adds the device support for one of the devices handled by the HSI
+ driver.
+
+ The OMAP HSI driver supports either:
+ - the OMAP MIPI HSI device
+ - the OMAP SSI device
+
+config OMAP_HSI_DEVICE
+ bool "HSI (OMAP MIPI HSI)"
+ depends on ARCH_OMAP4
+
+config OMAP_SSI_DEVICE
+ bool "SSI (OMAP SSI)"
+ depends on ARCH_OMAP34XX
+
+endchoice
+
+#
+# OMAP HSI char device kernel configuration
+#
+
+config OMAP_HSI_CHAR
+ tristate "OMAP HSI character driver"
+ depends on OMAP_HSI
+ ---help---
+ If you say Y here, you will enable the OMAP HSI character driver.
+
+ This driver provides a simple character device interface for
+ serial communication over the HSI bus.
+
+config OMAP_HSI_PROTOCOL
+ tristate "HSI Protocol driver for Infineon Modem"
+ depends on OMAP_HSI
+ ---help---
+ If you say Y here, you will enable the HSI Protocol driver.
+ This driver supports HSI protocol for Infineon Modem.
diff --git a/drivers/omap_hsi/Makefile b/drivers/omap_hsi/Makefile
new file mode 100644
index 0000000..0f072fb
--- /dev/null
+++ b/drivers/omap_hsi/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for HSI drivers
+#
+EXTRA_CFLAGS :=
+
+omap_hsi-objs := hsi_driver.o hsi_driver_dma.o hsi_driver_int.o \
+ hsi_driver_if.o hsi_driver_bus.o hsi_driver_gpio.o \
+ hsi_driver_fifo.o
+
+ifeq ($(CONFIG_DEBUG_FS), y)
+ omap_hsi-objs += hsi_driver_debugfs.o
+endif
+
+hsi_char-objs := hsi-char.o hsi-if.o
+
+hsi-protocol-objs := hsi_protocol.o hsi_protocol_if.o \
+ hsi_protocol_cmd.o
+
+obj-$(CONFIG_OMAP_HSI) += omap_hsi.o
+obj-$(CONFIG_OMAP_HSI_CHAR) += hsi_char.o
+obj-$(CONFIG_OMAP_HSI_PROTOCOL) += hsi-protocol.o
diff --git a/drivers/omap_hsi/hsi-char.c b/drivers/omap_hsi/hsi-char.c
new file mode 100644
index 0000000..871de30
--- /dev/null
+++ b/drivers/omap_hsi/hsi-char.c
@@ -0,0 +1,556 @@
+/*
+ * hsi-char.c
+ *
+ * HSI character device driver, implements the character device
+ * interface.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <asm/mach-types.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/hsi_driver_if.h>
+#include <linux/hsi_char.h>
+
+#include <plat/omap_hsi.h>
+
+#include "hsi-char.h"
+
+#define DRIVER_VERSION "0.2.1"
+#define HSI_CHAR_DEVICE_NAME "hsi_char"
+
+static unsigned int port = 1;
+module_param(port, uint, 1);
+MODULE_PARM_DESC(port, "HSI port to be probed");
+
+static unsigned int num_channels;
+static unsigned int channels_map[HSI_MAX_CHAR_DEVS] = { 0 };
+module_param_array(channels_map, uint, &num_channels, 0);
+MODULE_PARM_DESC(channels_map, "HSI channels to be probed");
+
+dev_t hsi_char_dev;
+
+struct char_queue {
+ struct list_head list;
+ u32 *data;
+ unsigned int count;
+};
+
+struct hsi_char {
+ unsigned int opened;
+ int poll_event;
+ struct list_head rx_queue;
+ struct list_head tx_queue;
+ spinlock_t lock; /* Serialize access to driver data and API */
+ struct fasync_struct *async_queue;
+ wait_queue_head_t rx_wait;
+ wait_queue_head_t tx_wait;
+ wait_queue_head_t poll_wait;
+};
+
+static struct hsi_char hsi_char_data[HSI_MAX_CHAR_DEVS];
+
+void if_hsi_notify(int ch, struct hsi_event *ev)
+{
+ struct char_queue *entry;
+
+ pr_debug("%s, ev = {0x%x, 0x%p, %u}\n", __func__, ev->event, ev->data,
+ ev->count);
+
+ spin_lock(&hsi_char_data[ch].lock);
+
+ if (!hsi_char_data[ch].opened) {
+ pr_debug("%s, device not opened\n!", __func__);
+ spin_unlock(&hsi_char_data[ch].lock);
+ return;
+ }
+
+ switch (HSI_EV_TYPE(ev->event)) {
+ case HSI_EV_IN:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_char_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ list_add_tail(&entry->list, &hsi_char_data[ch].rx_queue);
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_IN\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].rx_wait);
+ break;
+ case HSI_EV_OUT:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_char_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ hsi_char_data[ch].poll_event |= (POLLOUT | POLLWRNORM);
+ list_add_tail(&entry->list, &hsi_char_data[ch].tx_queue);
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_OUT\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].tx_wait);
+ break;
+ case HSI_EV_EXCEP:
+ hsi_char_data[ch].poll_event |= POLLPRI;
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_EXCEP\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].poll_wait);
+ break;
+ case HSI_EV_AVAIL:
+ hsi_char_data[ch].poll_event |= (POLLIN | POLLRDNORM);
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_AVAIL\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].poll_wait);
+ break;
+ default:
+ spin_unlock(&hsi_char_data[ch].lock);
+ break;
+ }
+}
+
+static int hsi_char_fasync(int fd, struct file *file, int on)
+{
+ int ch = (int)file->private_data;
+ if (fasync_helper(fd, file, on, &hsi_char_data[ch].async_queue) >= 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static unsigned int hsi_char_poll(struct file *file, poll_table * wait)
+{
+ int ch = (int)file->private_data;
+ unsigned int ret = 0;
+
+ /*printk(KERN_DEBUG "%s\n", __func__); */
+
+ poll_wait(file, &hsi_char_data[ch].poll_wait, wait);
+ poll_wait(file, &hsi_char_data[ch].tx_wait, wait);
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ ret = hsi_char_data[ch].poll_event;
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ pr_debug("%s, ret = 0x%x\n", __func__, ret);
+ return ret;
+}
+
+static ssize_t hsi_char_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ch = (int)file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len;
+ struct char_queue *entry;
+ ssize_t ret;
+
+ /*printk(KERN_DEBUG "%s, count = %d\n", __func__, count); */
+
+ /* only 32bit data is supported for now */
+ if ((count < 4) || (count & 3))
+ return -EINVAL;
+
+ data = kmalloc(count, GFP_ATOMIC);
+
+ ret = if_hsi_read(ch, data, count);
+ if (ret < 0) {
+ kfree(data);
+ goto out2;
+ }
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ add_wait_queue(&hsi_char_data[ch].rx_wait, &wait);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ if (!list_empty(&hsi_char_data[ch].rx_queue)) {
+ entry = list_entry(hsi_char_data[ch].rx_queue.next,
+ struct char_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ pr_debug("%s, data = 0x%p, data_len = %d\n",
+ __func__, data, data_len);
+
+ if (data_len) {
+ pr_debug("%s, RX finished\n", __func__);
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ hsi_char_data[ch].poll_event &= ~(POLLIN | POLLRDNORM);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+ if_hsi_poll(ch);
+ break;
+ } else if (file->f_flags & O_NONBLOCK) {
+ pr_debug("%s, O_NONBLOCK\n", __func__);
+ ret = -EAGAIN;
+ goto out;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ ret = -EAGAIN;
+ if_hsi_cancel_read(ch);
+ /* goto out; */
+ break;
+ }
+
+ /*printk(KERN_DEBUG "%s, going to sleep...\n", __func__); */
+ schedule();
+ /*printk(KERN_DEBUG "%s, woke up\n", __func__); */
+ }
+
+ if (data_len) {
+ ret = copy_to_user((void __user *)buf, data, data_len);
+ if (!ret)
+ ret = data_len;
+ }
+
+ kfree(data);
+
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_char_data[ch].rx_wait, &wait);
+
+out2:
+ /*printk(KERN_DEBUG "%s, ret = %d\n", __func__, ret); */
+ return ret;
+}
+
+static ssize_t hsi_char_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ch = (int)file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len = 0;
+ struct char_queue *entry;
+ ssize_t ret;
+
+ /*printk(KERN_DEBUG "%s, count = %d\n", __func__, count); */
+
+ /* only 32bit data is supported for now */
+ if ((count < 4) || (count & 3))
+ return -EINVAL;
+
+ data = kmalloc(count, GFP_ATOMIC);
+ if (!data) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+ if (copy_from_user(data, (void __user *)buf, count)) {
+ ret = -EFAULT;
+ kfree(data);
+ goto out2;
+ } else {
+ ret = count;
+ }
+
+ ret = if_hsi_write(ch, data, count);
+ if (ret < 0) {
+ kfree(data);
+ goto out2;
+ }
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ hsi_char_data[ch].poll_event &= ~(POLLOUT | POLLWRNORM);
+ add_wait_queue(&hsi_char_data[ch].tx_wait, &wait);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ if (!list_empty(&hsi_char_data[ch].tx_queue)) {
+ entry = list_entry(hsi_char_data[ch].tx_queue.next,
+ struct char_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ if (data_len) {
+ pr_debug("%s, TX finished\n", __func__);
+ ret = data_len;
+ break;
+ } else if (file->f_flags & O_NONBLOCK) {
+ pr_debug("%s, O_NONBLOCK\n", __func__);
+ ret = -EAGAIN;
+ goto out;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ /*printk(KERN_DEBUG "%s, going to sleep...\n", __func__); */
+ schedule();
+ /*printk(KERN_DEBUG "%s, woke up\n", __func__); */
+ }
+
+ kfree(data);
+
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_char_data[ch].tx_wait, &wait);
+
+out2:
+ /*printk(KERN_DEBUG "%s, ret = %d\n", __func__, ret); */
+ return ret;
+}
+
+static long hsi_char_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ch = (int)file->private_data;
+ unsigned int state;
+ size_t occ;
+ struct hsi_rx_config rx_cfg;
+ struct hsi_tx_config tx_cfg;
+ int ret = 0;
+
+ pr_debug("%s, ch = %d, cmd = 0x%08x\n", __func__, ch, cmd);
+
+ switch (cmd) {
+ case CS_SEND_BREAK:
+ if_hsi_send_break(ch);
+ break;
+ case CS_FLUSH_RX:
+ if_hsi_flush_rx(ch);
+ break;
+ case CS_FLUSH_TX:
+ if_hsi_flush_tx(ch);
+ break;
+ case CS_SET_ACWAKELINE:
+ if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
+ ret = -EFAULT;
+ else
+ if_hsi_set_acwakeline(ch, state);
+ break;
+ case CS_GET_ACWAKELINE:
+ if_hsi_get_acwakeline(ch, &state);
+ if (copy_to_user((void __user *)arg, &state, sizeof(state)))
+ ret = -EFAULT;
+ break;
+ case CS_GET_CAWAKELINE:
+ if_hsi_get_cawakeline(ch, &state);
+ if (copy_to_user((void __user *)arg, &state, sizeof(state)))
+ ret = -EFAULT;
+ break;
+ case CS_SET_RX:
+ if (copy_from_user(&rx_cfg, (void __user *)arg, sizeof(rx_cfg)))
+ ret = -EFAULT;
+ else
+ ret = if_hsi_set_rx(ch, &rx_cfg);
+ break;
+ case CS_GET_RX:
+ if_hsi_get_rx(ch, &rx_cfg);
+ if (copy_to_user((void __user *)arg, &rx_cfg, sizeof(rx_cfg)))
+ ret = -EFAULT;
+ break;
+ case CS_SET_TX:
+ if (copy_from_user(&tx_cfg, (void __user *)arg, sizeof(tx_cfg)))
+ ret = -EFAULT;
+ else
+ ret = if_hsi_set_tx(ch, &tx_cfg);
+ break;
+ case CS_GET_TX:
+ if_hsi_get_tx(ch, &tx_cfg);
+ if (copy_to_user((void __user *)arg, &tx_cfg, sizeof(tx_cfg)))
+ ret = -EFAULT;
+ break;
+ case CS_SW_RESET:
+ if_hsi_sw_reset(ch);
+ break;
+ case CS_GET_FIFO_OCCUPANCY:
+ if_hsi_get_fifo_occupancy(ch, &occ);
+ if (copy_to_user((void __user *)arg, &occ, sizeof(occ)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static int hsi_char_open(struct inode *inode, struct file *file)
+{
+ int ret = 0, ch = iminor(inode);
+ int i;
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++)
+ if ((channels_map[i] - 1) == ch)
+ break;
+
+ if (i == HSI_MAX_CHAR_DEVS) {
+ pr_err("HSI char open: Channel %d not found\n", ch);
+ return -ENODEV;
+ }
+
+ pr_debug("HSI char open: opening channel %d\n", ch);
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+
+ if (hsi_char_data[ch].opened) {
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+ pr_err("HSI char open: Channel %d already opened\n", ch);
+ return -EBUSY;
+ }
+
+ file->private_data = (void *)ch;
+ hsi_char_data[ch].opened++;
+ hsi_char_data[ch].poll_event = (POLLOUT | POLLWRNORM);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ ret = if_hsi_start(ch);
+
+ return ret;
+}
+
+static int hsi_char_release(struct inode *inode, struct file *file)
+{
+ int ch = (int)file->private_data;
+ struct char_queue *entry;
+ struct list_head *cursor, *next;
+
+ pr_debug("%s, ch = %d\n", __func__, ch);
+
+ if_hsi_stop(ch);
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ hsi_char_data[ch].opened--;
+
+ if (!list_empty(&hsi_char_data[ch].rx_queue)) {
+ list_for_each_safe(cursor, next, &hsi_char_data[ch].rx_queue) {
+ entry = list_entry(cursor, struct char_queue, list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ if (!list_empty(&hsi_char_data[ch].tx_queue)) {
+ list_for_each_safe(cursor, next, &hsi_char_data[ch].tx_queue) {
+ entry = list_entry(cursor, struct char_queue, list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ return 0;
+}
+
+static const struct file_operations hsi_char_fops = {
+ .owner = THIS_MODULE,
+ .read = hsi_char_read,
+ .write = hsi_char_write,
+ .poll = hsi_char_poll,
+ .unlocked_ioctl = hsi_char_ioctl,
+ .open = hsi_char_open,
+ .release = hsi_char_release,
+ .fasync = hsi_char_fasync,
+};
+
+static struct cdev hsi_char_cdev;
+
+static int __init hsi_char_init(void)
+{
+ int ret, i;
+
+ pr_info("HSI character device version " DRIVER_VERSION "\n");
+ pr_info("HSI char driver: %d channels mapped\n", num_channels);
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ init_waitqueue_head(&hsi_char_data[i].rx_wait);
+ init_waitqueue_head(&hsi_char_data[i].tx_wait);
+ init_waitqueue_head(&hsi_char_data[i].poll_wait);
+ spin_lock_init(&hsi_char_data[i].lock);
+ hsi_char_data[i].opened = 0;
+ INIT_LIST_HEAD(&hsi_char_data[i].rx_queue);
+ INIT_LIST_HEAD(&hsi_char_data[i].tx_queue);
+ }
+
+ /*printk(KERN_DEBUG "%s, devname = %s\n", __func__, devname); */
+
+ ret = if_hsi_init(port, channels_map, num_channels);
+ if (ret)
+ return ret;
+
+ ret =
+ alloc_chrdev_region(&hsi_char_dev, 0, HSI_MAX_CHAR_DEVS,
+ HSI_CHAR_DEVICE_NAME);
+ if (ret < 0) {
+ pr_err("HSI character driver: Failed to register\n");
+ return ret;
+ }
+
+ cdev_init(&hsi_char_cdev, &hsi_char_fops);
+ ret = cdev_add(&hsi_char_cdev, hsi_char_dev, HSI_MAX_CHAR_DEVS);
+ if (ret < 0) {
+ pr_err("HSI character device: Failed to add char device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit hsi_char_exit(void)
+{
+ cdev_del(&hsi_char_cdev);
+ unregister_chrdev_region(hsi_char_dev, HSI_MAX_CHAR_DEVS);
+ if_hsi_exit();
+}
+
+MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
+MODULE_AUTHOR("Sebatien Jan <s-jan@ti.com> / Texas Instruments");
+MODULE_DESCRIPTION("HSI character device");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(hsi_char_init);
+module_exit(hsi_char_exit);
diff --git a/drivers/omap_hsi/hsi-char.h b/drivers/omap_hsi/hsi-char.h
new file mode 100644
index 0000000..c4b1c4c
--- /dev/null
+++ b/drivers/omap_hsi/hsi-char.h
@@ -0,0 +1,35 @@
+/*
+ * hsi-char.h
+ *
+ * HSI character driver private declaration header file.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HSI_CHAR_H
+#define _HSI_CHAR_H
+
+#include "hsi-if.h"
+
+/* how many char devices would be created at most */
+#define HSI_MAX_CHAR_DEVS 16
+
+/* Max HSI channel id allowed to be handled as char device. */
+/* Current range [1, 16] */
+#define HSI_MAX_CHAR_DEV_ID 16
+
+void if_hsi_notify(int ch, struct hsi_event *ev);
+
+#endif /* _HSI_CHAR_H */
diff --git a/drivers/omap_hsi/hsi-if.c b/drivers/omap_hsi/hsi-if.c
new file mode 100644
index 0000000..5228b6a
--- /dev/null
+++ b/drivers/omap_hsi/hsi-if.c
@@ -0,0 +1,672 @@
+ /*
+ * hsi-if.c
+ *
+ * Part of the HSI character driver, implements the HSI interface.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <asm/mach-types.h>
+#include <linux/ioctl.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/bitmap.h>
+
+#include <linux/hsi_driver_if.h>
+#include <linux/hsi_char.h>
+
+#include "hsi-char.h"
+#include "hsi-if.h"
+
+#define HSI_CHANNEL_STATE_UNAVAIL (1 << 0)
+#define HSI_CHANNEL_STATE_READING (1 << 1)
+#define HSI_CHANNEL_STATE_WRITING (1 << 2)
+
+#define PORT1 0
+#define PORT2 1
+
+#define RXCONV(dst, src) \
+ do { \
+ (dst)->mode = (src)->mode; \
+ (dst)->flow = (src)->flow; \
+ (dst)->frame_size = (src)->frame_size; \
+ (dst)->channels = (src)->channels; \
+ (dst)->divisor = (src)->divisor; \
+ (dst)->counters = (src)->counters; \
+ } while (0)
+
+#define TXCONV(dst, src) \
+ do { \
+ (dst)->mode = (src)->mode; \
+ (dst)->flow = (src)->flow; \
+ (dst)->frame_size = (src)->frame_size; \
+ (dst)->channels = (src)->channels; \
+ (dst)->divisor = (src)->divisor; \
+ (dst)->arb_mode = (src)->arb_mode; \
+ } while (0)
+
+struct if_hsi_channel {
+ struct hsi_device *dev;
+ unsigned int channel_id;
+ u32 *tx_data;
+ unsigned int tx_count; /* Number of bytes to be written */
+ u32 *rx_data;
+ unsigned int rx_count; /* Number of bytes to be read */
+ unsigned int opened;
+ unsigned int state;
+ spinlock_t lock; /* Serializes access to channel data */
+};
+
+struct if_hsi_iface {
+ struct if_hsi_channel channels[HSI_MAX_CHAR_DEVS];
+ int bootstrap;
+ unsigned long init_chan_map;
+ spinlock_t lock; /* Serializes access to HSI functional interface */
+};
+
+static void if_hsi_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg);
+static int __devinit if_hsi_probe(struct hsi_device *dev);
+static int __devexit if_hsi_remove(struct hsi_device *dev);
+
+static struct hsi_device_driver if_hsi_char_driver = {
+ .ctrl_mask = ANY_HSI_CONTROLLER,
+ .probe = if_hsi_probe,
+ .remove = __devexit_p(if_hsi_remove),
+ .driver = {
+ .name = "hsi_char"},
+};
+
+static struct if_hsi_iface hsi_iface;
+
+static int if_hsi_read_on(int ch, u32 *data, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ pr_err("Read still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+ channel->state |= HSI_CHANNEL_STATE_READING;
+ channel->rx_data = data;
+ channel->rx_count = count;
+ spin_unlock(&channel->lock);
+
+ ret = hsi_read(channel->dev, data, count / 4);
+ dev_dbg(&channel->dev->device, "%s, ch = %d, ret = %d\n", __func__, ch,
+ ret);
+
+ return ret;
+}
+
+/* HSI char driver read done callback */
+static void if_hsi_read_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+ channel = &hsi_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ ev.event = HSI_EV_IN;
+ ev.data = channel->rx_data;
+ ev.count = 4 * size; /* Convert size to number of u8, not u32 */
+ spin_unlock(&channel->lock);
+ if_hsi_notify(dev->n_ch, &ev);
+}
+
+int if_hsi_read(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_read_on(ch, data, count);
+ return ret;
+}
+
+int if_hsi_poll(int ch)
+{
+ struct if_hsi_channel *channel;
+ int ret = 0;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_poll(channel->dev);
+ return ret;
+}
+
+static int if_hsi_write_on(int ch, u32 *address, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_iface.channels[ch];
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ pr_err("Write still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+
+ channel->tx_data = address;
+ channel->tx_count = count;
+ channel->state |= HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_write(channel->dev, address, count / 4);
+ return ret;
+}
+
+/* HSI char driver write done callback */
+static void if_hsi_write_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+ channel = &hsi_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ ev.event = HSI_EV_OUT;
+ ev.data = channel->tx_data;
+ ev.count = 4 * size; /* Convert size to number of u8, not u32 */
+ spin_unlock(&channel->lock);
+ if_hsi_notify(dev->n_ch, &ev);
+}
+
+int if_hsi_write(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_write_on(ch, data, count);
+ return ret;
+}
+
+void if_hsi_send_break(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ hsi_ioctl(channel->dev, HSI_IOCTL_SEND_BREAK, NULL);
+}
+
+void if_hsi_flush_rx(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_FLUSH_RX, NULL);
+}
+
+void if_hsi_flush_ch(int ch)
+{
+ /* FIXME - Check the purpose of this function */
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+}
+
+void if_hsi_flush_tx(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_FLUSH_TX, NULL);
+}
+
+void if_hsi_get_acwakeline(int ch, unsigned int *state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_ACWAKE, state);
+}
+
+void if_hsi_set_acwakeline(int ch, unsigned int state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev,
+ state ? HSI_IOCTL_ACWAKE_UP : HSI_IOCTL_ACWAKE_DOWN, NULL);
+}
+
+void if_hsi_get_cawakeline(int ch, unsigned int *state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_CAWAKE, state);
+}
+
+int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg)
+{
+ int ret;
+ struct if_hsi_channel *channel;
+ struct hsr_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ RXCONV(&ctx, cfg);
+ ret = hsi_ioctl(channel->dev, HSI_IOCTL_SET_RX, &ctx);
+ return ret;
+}
+
+void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg)
+{
+ struct if_hsi_channel *channel;
+ struct hsr_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_RX, &ctx);
+ RXCONV(cfg, &ctx);
+}
+
+int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg)
+{
+ int ret;
+ struct if_hsi_channel *channel;
+ struct hst_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ TXCONV(&ctx, cfg);
+ ret = hsi_ioctl(channel->dev, HSI_IOCTL_SET_TX, &ctx);
+ return ret;
+}
+
+void if_hsi_get_tx(int ch, struct hsi_tx_config *cfg)
+{
+ struct if_hsi_channel *channel;
+ struct hst_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_TX, &ctx);
+ TXCONV(cfg, &ctx);
+}
+
+void if_hsi_sw_reset(int ch)
+{
+ struct if_hsi_channel *channel;
+ int i;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_SW_RESET, NULL);
+
+ spin_lock_bh(&hsi_iface.lock);
+ /* Reset HSI channel states */
+ for (i = 0; i < HSI_MAX_PORTS; i++)
+ if_hsi_char_driver.ch_mask[i] = 0;
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ channel = &hsi_iface.channels[i];
+ channel->opened = 0;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ }
+ spin_unlock_bh(&hsi_iface.lock);
+}
+
+void if_hsi_get_fifo_occupancy(int ch, size_t *occ)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_FIFO_OCCUPANCY, occ);
+}
+
+void if_hsi_cancel_read(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ if (channel->state & HSI_CHANNEL_STATE_READING)
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+}
+
+void if_hsi_cancel_write(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ if (channel->state & HSI_CHANNEL_STATE_WRITING)
+ hsi_write_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+}
+
+static int if_hsi_openchannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (channel->state == HSI_CHANNEL_STATE_UNAVAIL) {
+ pr_err("Channel %d is not available\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ if (channel->opened) {
+ pr_err("Channel %d is busy\n", channel->channel_id);
+ ret = -EBUSY;
+ goto leave;
+ }
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+ spin_unlock(&channel->lock);
+
+ ret = hsi_open(channel->dev);
+
+ spin_lock(&channel->lock);
+ if (ret < 0) {
+ pr_err("Could not open channel %d\n", channel->channel_id);
+ goto leave;
+ }
+
+ channel->opened = 1;
+
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+static int if_hsi_closechannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (!channel->opened)
+ goto leave;
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ /* Stop any pending read/write */
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ }
+
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+ hsi_write_cancel(channel->dev);
+ } else
+ spin_unlock(&channel->lock);
+
+ hsi_close(channel->dev);
+
+ spin_lock(&channel->lock);
+ channel->opened = 0;
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+int if_hsi_start(int ch)
+{
+ struct if_hsi_channel *channel;
+ int ret = 0;
+
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ spin_lock_bh(&channel->lock);
+ channel->state = 0;
+ spin_unlock_bh(&channel->lock);
+
+ ret = if_hsi_openchannel(channel);
+ if (ret < 0) {
+ pr_err("Could not open channel %d\n", ch);
+ goto error;
+ }
+
+ if_hsi_poll(ch);
+error:
+ return ret;
+}
+
+void if_hsi_stop(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ if_hsi_closechannel(channel);
+}
+
+static int __devinit if_hsi_probe(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int ret = -ENXIO, port;
+
+ dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
+ dev->n_ch);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_char_driver.ch_mask[port])
+ break;
+ }
+
+ if (port == HSI_MAX_PORTS)
+ return -ENXIO;
+
+ if (dev->n_ch >= HSI_MAX_CHAR_DEV_ID) {
+ pr_err("HSI char driver cannot handle channel %d\n", dev->n_ch);
+ return -ENXIO;
+ }
+
+ address = &if_hsi_char_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ hsi_set_read_cb(dev, if_hsi_read_done);
+ hsi_set_write_cb(dev, if_hsi_write_done);
+ hsi_set_port_event_cb(dev, if_hsi_port_event);
+ channel = &hsi_iface.channels[dev->n_ch];
+ channel->dev = dev;
+ channel->state = 0;
+ ret = 0;
+ hsi_iface.init_chan_map ^= (1 << dev->n_ch);
+ }
+ spin_unlock_bh(&hsi_iface.lock);
+
+ return ret;
+}
+
+static int __devexit if_hsi_remove(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int ret = -ENXIO, port;
+
+ dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
+ dev->n_ch);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_char_driver.ch_mask[port])
+ break;
+ }
+
+ if (port == HSI_MAX_PORTS)
+ return -ENXIO;
+
+ address = &if_hsi_char_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ hsi_set_read_cb(dev, NULL);
+ hsi_set_write_cb(dev, NULL);
+ hsi_set_port_event_cb(dev, NULL);
+ channel = &hsi_iface.channels[dev->n_ch];
+ channel->dev = NULL;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ ret = 0;
+ }
+ spin_unlock_bh(&hsi_iface.lock);
+
+ return ret;
+}
+
+static void if_hsi_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg)
+{
+ struct hsi_event ev;
+ int i;
+
+ ev.event = HSI_EV_EXCEP;
+ ev.data = (u32 *) 0;
+ ev.count = 0;
+
+ switch (event) {
+ case HSI_EVENT_BREAK_DETECTED:
+ pr_debug("%s, HWBREAK detected\n", __func__);
+ ev.data = (u32 *) HSI_HWBREAK;
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ if (hsi_iface.channels[i].opened)
+ if_hsi_notify(i, &ev);
+ }
+ break;
+ case HSI_EVENT_HSR_DATAAVAILABLE:
+ i = (int)arg;
+ pr_debug("%s, HSI_EVENT_HSR_DATAAVAILABLE channel = %d\n",
+ __func__, i);
+ ev.event = HSI_EV_AVAIL;
+ if (hsi_iface.channels[i].opened)
+ if_hsi_notify(i, &ev);
+ break;
+ case HSI_EVENT_CAWAKE_UP:
+ pr_debug("%s, CAWAKE up\n", __func__);
+ break;
+ case HSI_EVENT_CAWAKE_DOWN:
+ pr_debug("%s, CAWAKE down\n", __func__);
+ break;
+ case HSI_EVENT_ERROR:
+ pr_debug("%s, HSI ERROR occured\n", __func__);
+ break;
+ default:
+ pr_warning("%s, Unknown event(%d)\n", __func__, event);
+ break;
+ }
+}
+
+int __init if_hsi_init(unsigned int port, unsigned int *channels_map,
+ unsigned int num_channels)
+{
+ struct if_hsi_channel *channel;
+ int i, ret = 0;
+
+ pr_debug("%s, port = %d\n", __func__, port);
+
+ port -= 1;
+ if (port >= HSI_MAX_PORTS)
+ return -EINVAL;
+
+ hsi_iface.bootstrap = 1;
+ spin_lock_init(&hsi_iface.lock);
+
+ for (i = 0; i < HSI_MAX_PORTS; i++)
+ if_hsi_char_driver.ch_mask[i] = 0;
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ channel = &hsi_iface.channels[i];
+ channel->dev = NULL;
+ channel->opened = 0;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ channel->channel_id = i;
+ spin_lock_init(&channel->lock);
+ }
+
+ for (i = 0; (i < num_channels) && channels_map[i]; i++) {
+ pr_debug("%s, port = %d, channels_map[i] = %d\n", __func__,
+ port, channels_map[i]);
+ if ((channels_map[i] - 1) < HSI_MAX_CHAR_DEV_ID)
+ if_hsi_char_driver.ch_mask[port] |=
+ (1 << ((channels_map[i] - 1)));
+ else {
+ pr_err("Channel %d cannot be handled by the HSI "
+ "driver.\n", channels_map[i]);
+ return -EINVAL;
+ }
+
+ }
+ hsi_iface.init_chan_map = if_hsi_char_driver.ch_mask[port];
+
+ ret = hsi_register_driver(&if_hsi_char_driver);
+ if (ret)
+ pr_err("Error while registering HSI driver %d", ret);
+
+ if (hsi_iface.init_chan_map) {
+ ret = -ENXIO;
+ pr_err("HSI: Some channels could not be registered (out of "
+ "range or already registered?)\n");
+ }
+ return ret;
+}
+
+int __devexit if_hsi_exit(void)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int i, port;
+
+ pr_debug("%s\n", __func__);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_char_driver.ch_mask[port])
+ break;
+ }
+
+ if (port == HSI_MAX_PORTS)
+ return -ENXIO;
+
+ address = &if_hsi_char_driver.ch_mask[port];
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ channel = &hsi_iface.channels[i];
+ if (channel->opened) {
+ if_hsi_set_acwakeline(i, HSI_IOCTL_ACWAKE_DOWN);
+ if_hsi_closechannel(channel);
+ }
+ }
+ hsi_unregister_driver(&if_hsi_char_driver);
+ return 0;
+}
diff --git a/drivers/omap_hsi/hsi-if.h b/drivers/omap_hsi/hsi-if.h
new file mode 100644
index 0000000..96afdd4
--- /dev/null
+++ b/drivers/omap_hsi/hsi-if.h
@@ -0,0 +1,69 @@
+/*
+ * hsi-if.h
+ *
+ * Part of the HSI character driver, private headers.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HSI_IF_H
+#define _HSI_IF_H
+
+#define HSI_EV_MASK (0xffff << 0)
+#define HSI_EV_TYPE_MASK (0x0f << 16)
+#define HSI_EV_IN (0x01 << 16)
+#define HSI_EV_OUT (0x02 << 16)
+#define HSI_EV_EXCEP (0x03 << 16)
+#define HSI_EV_AVAIL (0x04 << 16)
+#define HSI_EV_TYPE(event) ((event) & HSI_EV_TYPE_MASK)
+
+#define HSI_HWBREAK 1
+#define HSI_ERROR 2
+
+struct hsi_event {
+ unsigned int event;
+ u32 *data;
+ unsigned int count;
+};
+
+int if_hsi_init(unsigned int port, unsigned int *channels_map,
+ unsigned int num_channels);
+int if_hsi_exit(void);
+
+int if_hsi_start(int ch);
+void if_hsi_stop(int ch);
+
+void if_hsi_send_break(int ch);
+void if_hsi_flush_rx(int ch);
+void if_hsi_flush_tx(int ch);
+void if_hsi_bootstrap(int ch);
+void if_hsi_set_acwakeline(int ch, unsigned int state);
+void if_hsi_get_acwakeline(int ch, unsigned int *state);
+void if_hsi_get_cawakeline(int ch, unsigned int *state);
+int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg);
+void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg);
+int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg);
+void if_hsi_get_tx(int ch, struct hsi_tx_config *cfg);
+void if_hsi_sw_reset(int ch);
+void if_hsi_get_fifo_occupancy(int ch, size_t *occ);
+
+int if_hsi_read(int ch, u32 *data, unsigned int count);
+int if_hsi_poll(int ch);
+int if_hsi_write(int ch, u32 *data, unsigned int count);
+
+void if_hsi_cancel_read(int ch);
+void if_hsi_cancel_write(int ch);
+
+#endif /* _HSI_IF_H */
diff --git a/drivers/omap_hsi/hsi-protocol-if.h b/drivers/omap_hsi/hsi-protocol-if.h
new file mode 100644
index 0000000..f56ef36
--- /dev/null
+++ b/drivers/omap_hsi/hsi-protocol-if.h
@@ -0,0 +1,187 @@
+/*
+ * hsi-if.h
+ *
+ * Part of the HSI character driver, private headers.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HSI_IF_H
+#define _HSI_IF_H
+
+#define HSI_EV_MASK (0xffff << 0)
+#define HSI_EV_TYPE_MASK (0x0f << 16)
+#define HSI_EV_IN (0x01 << 16)
+#define HSI_EV_OUT (0x02 << 16)
+#define HSI_EV_EXCEP (0x03 << 16)
+#define HSI_EV_AVAIL (0x04 << 16)
+#define HSI_EV_TYPE(event) ((event) & HSI_EV_TYPE_MASK)
+
+#define HSI_HWBREAK 1
+#define HSI_ERROR 2
+
+#define HSI_MAX_CHANNELS 16
+#define CHANNEL_MASK 0xFF
+#define HSI_LL_INVALID_CHANNEL 0xFF
+
+struct hsi_event {
+ unsigned int event;
+ u32 *data;
+ unsigned int count;
+};
+
+struct if_hsi_channel {
+ struct hsi_device *dev;
+ unsigned int channel_id;
+ u32 *tx_data;
+ unsigned int tx_count;
+ u32 *rx_data;
+ unsigned int rx_count;
+ unsigned int opened;
+ unsigned int state;
+ u32 *tx_buf;
+ u32 *rx_buf;
+ unsigned int tx_state;
+ unsigned int rx_state;
+ unsigned int tx_nak_count;
+ unsigned int rx_nak_count;
+ spinlock_t lock; /* Serializes access to channel data */
+};
+
+struct if_hsi_iface {
+ struct if_hsi_channel channels[HSI_MAX_CHANNELS];
+#if 0
+ int bootstrap;
+#endif
+ unsigned long init_chan_map;
+ spinlock_t lock; /* Serializes access to HSI functional interface */
+};
+
+struct if_hsi_cmd {
+ u32 tx_cmd[50];
+ u32 rx_cmd[50];
+ struct timespec tx_cmd_time[50];
+ struct timespec rx_cmd_time[50];
+};
+
+enum {
+ HSI_LL_MSG_BREAK = 0x00,
+ HSI_LL_MSG_ECHO = 0x01,
+ HSI_LL_MSG_INFO_REQ = 0x02,
+ HSI_LL_MSG_INFO = 0x03,
+ HSI_LL_MSG_CONFIGURE = 0x04,
+ HSI_LL_MSG_ALLOCATE_CH = 0x05,
+ HSI_LL_MSG_RELEASE_CH = 0x06,
+ HSI_LL_MSG_OPEN_CONN = 0x07,
+ HSI_LL_MSG_CONN_READY = 0x08,
+ HSI_LL_MSG_CONN_CLOSED = 0x09,
+ HSI_LL_MSG_CANCEL_CONN = 0x0A,
+ HSI_LL_MSG_ACK = 0x0B,
+ HSI_LL_MSG_NAK = 0x0C,
+ HSI_LL_MSG_CONF_RATE = 0x0D,
+ HSI_LL_MSG_OPEN_CONN_OCTET = 0x0E,
+ HSI_LL_MSG_INVALID = 0xFF,
+};
+
+enum {
+ HSI_LL_TX_STATE_UNDEF,
+ HSI_LL_TX_STATE_CLOSED,
+ HSI_LL_TX_STATE_IDLE,
+ HSI_LL_TX_STATE_POWER_DOWN,
+ HSI_LL_TX_STATE_ERROR,
+ HSI_LL_TX_STATE_SEND_OPEN_CONN,
+ HSI_LL_TX_STATE_WAIT_FOR_ACK,
+ HSI_LL_TX_STATE_NACK,
+ HSI_LL_TX_STATE_WAIT_FOR_CONN_READY,
+ HSI_LL_TX_STATE_SEND_CONF_RATE,
+ HSI_LL_TX_STATE_WAIT_FOR_CONF_ACK,
+ HSI_LL_TX_STATE_TX,
+ HSI_LL_TX_STATE_WAIT_FOR_CONN_CLOSED,
+ HSI_LL_TX_STATE_TO_OPEN_CONN,
+ HSI_LL_TX_STATE_TO_ACK,
+ HSI_LL_TX_STATE_TO_READY,
+ HSI_LL_TX_STATE_TO_CONF,
+ HSI_LL_TX_STATE_TO_CONF_ACK,
+ HSI_LL_TX_STATE_TO_TX,
+ HSI_LL_TX_STATE_TO_CLOSE,
+ HSI_LL_TX_STATE_SEND_BREAK,
+};
+
+enum {
+ HSI_LL_RX_STATE_UNDEF,
+ HSI_LL_RX_STATE_CLOSED,
+ HSI_LL_RX_STATE_IDLE,
+ HSI_LL_RX_STATE_POWER_DOWN,
+ HSI_LL_RX_STATE_ERROR,
+ HSI_LL_RX_STATE_BLOCKED,
+ HSI_LL_RX_STATE_SEND_ACK,
+ HSI_LL_RX_STATE_SEND_NACK,
+ HSI_LL_RX_STATE_SEND_CONN_READY,
+ HSI_LL_RX_STATE_RX,
+ HSI_LL_RX_STATE_SEND_CONN_CLOSED,
+ HSI_LL_RX_STATE_SEND_CONN_CANCEL,
+ HSI_LL_RX_STATE_WAIT_FOR_CANCEL_CONN_ACK,
+ HSI_LL_RX_STATE_SEND_CONF_ACK,
+ HSI_LL_RX_STATE_SEND_CONF_NACK,
+ HSI_LL_RX_STATE_TO_RX,
+ HSI_LL_RX_STATE_TO_ACK,
+ HSI_LL_RX_STATE_TO_NACK,
+ HSI_LL_RX_STATE_TO_CONN_READY,
+ HSI_LL_RX_STATE_TO_CONN_CLOSED,
+ HSI_LL_RX_STATE_TO_CONN_CANCEL,
+ HSI_LL_RX_STATE_TO_CONN_CANCEL_ACK,
+ HSI_LL_RX_STATE_TO_CONF_ACK,
+ HSI_LL_RX_STATE_SEND_BREAK,
+};
+
+
+int if_hsi_init(void);
+int if_hsi_exit(void);
+
+int if_hsi_start(int ch);
+void if_hsi_stop(int ch);
+
+void if_hsi_send_break(int ch);
+void if_hsi_flush_rx(int ch);
+void if_hsi_flush_tx(int ch);
+void if_hsi_bootstrap(int ch);
+void if_hsi_set_wakeline(int ch, unsigned int state);
+void if_hsi_get_wakeline(int ch, unsigned int *state);
+
+#if 0
+int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg);
+void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg);
+int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg);
+void if_hsi_get_tx(int ch, struct hsi_tx_config *cfg);
+#endif
+
+int if_hsi_read(int ch, u32 *data, unsigned int count);
+int if_hsi_poll(int ch);
+int if_hsi_write(int ch, u32 *data, unsigned int count);
+
+void if_hsi_cancel_read(int ch);
+void if_hsi_cancel_write(int ch);
+
+void if_notify(int ch, struct hsi_event *ev);
+int hsi_proto_read(int ch, u32 *buffer, int count);
+int hsi_proto_write(int ch, u32 *buffer, int length);
+int hsi_decode_cmd(u32 *data, u32 *cmd, u32 *ch, u32 *param);
+int protocol_create_cmd(int cmd_type, unsigned int channel, void *arg);
+int hsi_protocol_send_command(u32 cmd, u32 channel, u32 param);
+void rx_stm(u32 cmd, u32 ch, u32 param);
+#if 0
+int hsi_start_protocol(void);
+#endif
+#endif /* _HSI_IF_H */
diff --git a/drivers/omap_hsi/hsi_driver.c b/drivers/omap_hsi/hsi_driver.c
new file mode 100644
index 0000000..159888e
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver.c
@@ -0,0 +1,1147 @@
+/*
+ * hsi_driver.c
+ *
+ * Implements HSI module interface, initialization, and PM related functions.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/omap4-common.h>
+#include <plat/omap_device.h>
+
+#include "hsi_driver.h"
+
+#if 0
+static struct pm_qos_request_list *pm_qos_handle;
+#endif
+
+#define HSI_MODULENAME "omap_hsi"
+#define HSI_DRIVER_VERSION "0.4.1"
+#define HSI_RESETDONE_MAX_RETRIES 5 /* Max 5*L4 Read cycles waiting for */
+ /* reset to complete */
+#define HSI_RESETDONE_NORMAL_RETRIES 1 /* Reset should complete in 1 R/W */
+
+void hsi_save_ctx(struct hsi_dev *hsi_ctrl)
+{
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ void __iomem *base = hsi_ctrl->base;
+ struct port_ctx *p;
+ int port;
+
+ pdata->ctx->sysconfig = hsi_inl(base, HSI_SYS_SYSCONFIG_REG);
+ pdata->ctx->gdd_gcr = hsi_inl(base, HSI_GDD_GCR_REG);
+ if (hsi_driver_device_is_hsi(pdev))
+ pdata->ctx->dll = hsi_inl(base, HSI_HSR_DLL_REG);
+
+ for (port = 1; port <= pdata->num_ports; port++) {
+ p = &pdata->ctx->pctx[port - 1];
+ /* HSI TOP */
+ p->sys_mpu_enable[0] = hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_REG(port, 0));
+ p->sys_mpu_enable[1] = hsi_inl(base,
+ HSI_SYS_MPU_U_ENABLE_REG(port, 0));
+
+ /* HST */
+ p->hst.mode = hsi_inl(base, HSI_HST_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ p->hst.frame_size = hsi_inl(base,
+ HSI_HST_FRAMESIZE_REG(port));
+ p->hst.divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port));
+ p->hst.channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port));
+ p->hst.arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port));
+
+ /* HSR */
+ p->hsr.mode = hsi_inl(base, HSI_HSR_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ p->hsr.frame_size = hsi_inl(base,
+ HSI_HSR_FRAMESIZE_REG(port));
+ p->hsr.divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port));
+ p->hsr.channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port));
+ p->hsr.counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
+ }
+}
+
+void hsi_restore_ctx(struct hsi_dev *hsi_ctrl)
+{
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ void __iomem *base = hsi_ctrl->base;
+ struct port_ctx *p;
+ int port;
+
+ hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG);
+ hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG);
+ if (hsi_driver_device_is_hsi(pdev))
+ hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG);
+
+ for (port = 1; port <= pdata->num_ports; port++) {
+ p = &pdata->ctx->pctx[port - 1];
+ /* HSI TOP */
+ hsi_outl(p->sys_mpu_enable[0], base,
+ HSI_SYS_MPU_ENABLE_REG(port, 0));
+ hsi_outl(p->sys_mpu_enable[1], base,
+ HSI_SYS_MPU_U_ENABLE_REG(port, 0));
+
+ /* HST */
+ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(p->hst.frame_size, base,
+ HSI_HST_FRAMESIZE_REG(port));
+ hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port));
+ hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port));
+ hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port));
+
+ /* HSR */
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(p->hsr.frame_size, base,
+ HSI_HSR_FRAMESIZE_REG(port));
+ hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port));
+ hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port));
+ hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port));
+ }
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ /* SW strategy for HSI fifo management can be changed here */
+ hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT);
+ }
+
+ /* As a last step move HSR from MODE_VAL.SLEEP to the relevant mode. */
+ /* This will enable the ACREADY flow control mechanism. */
+ for (port = 1; port <= pdata->num_ports; port++) {
+ p = &pdata->ctx->pctx[port - 1];
+ hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port));
+ }
+}
+
+
+/* NOTE: Function called in soft interrupt context (tasklet) */
+int hsi_port_event_handler(struct hsi_port *p, unsigned int event, void *arg)
+{
+ struct hsi_channel *hsi_channel;
+ int ch;
+
+
+ if (event == HSI_EVENT_HSR_DATAAVAILABLE) {
+ /* The data-available event is channel-specific and must not be
+ * broadcasted
+ */
+ hsi_channel = p->hsi_channel + (int)arg;
+ read_lock(&hsi_channel->rw_lock);
+ if ((hsi_channel->dev) && (hsi_channel->port_event))
+ hsi_channel->port_event(hsi_channel->dev, event, arg);
+ read_unlock(&hsi_channel->rw_lock);
+ } else {
+ for (ch = 0; ch < p->max_ch; ch++) {
+ hsi_channel = p->hsi_channel + ch;
+ read_lock(&hsi_channel->rw_lock);
+ if ((hsi_channel->dev) && (hsi_channel->port_event))
+ hsi_channel->port_event(hsi_channel->dev,
+ event, arg);
+ read_unlock(&hsi_channel->rw_lock);
+ }
+ }
+ return 0;
+}
+
+static void hsi_dev_release(struct device *dev)
+{
+ /* struct device kfree is already made in unregister_hsi_devices().
+ * Registering this function is necessary to avoid an error from
+ * the device_release() function.
+ */
+}
+
+/* Register a hsi_device, linked to a port and channel id */
+static int __init reg_hsi_dev_ch(struct hsi_dev *hsi_ctrl, unsigned int p,
+ unsigned int ch)
+{
+ struct hsi_device *dev;
+ struct hsi_port *port = &hsi_ctrl->hsi_port[p];
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->n_ctrl = hsi_ctrl->id;
+ dev->n_p = p;
+ dev->n_ch = ch;
+ dev->ch = &port->hsi_channel[ch];
+ dev->device.bus = &hsi_bus_type;
+ dev->device.parent = hsi_ctrl->dev;
+ dev->device.release = hsi_dev_release;
+ if (dev->n_ctrl < 0)
+ dev_set_name(&dev->device, "omap_hsi-p%u.c%u", p, ch);
+ else
+ dev_set_name(&dev->device, "omap_hsi%d-p%u.c%u", dev->n_ctrl, p,
+ ch);
+
+ dev_dbg(hsi_ctrl->dev,
+ "reg_hsi_dev_ch, port %d, ch %d, hsi_ctrl->dev:0x%x,"
+ "&dev->device:0x%x\n",
+ p, ch, (unsigned int)hsi_ctrl->dev, (unsigned int)&dev->device);
+
+ err = device_register(&dev->device);
+ if (err >= 0) {
+ write_lock_bh(&port->hsi_channel[ch].rw_lock);
+ port->hsi_channel[ch].dev = dev;
+ write_unlock_bh(&port->hsi_channel[ch].rw_lock);
+ } else {
+ kfree(dev);
+ }
+ return err;
+}
+
+static int __init register_hsi_devices(struct hsi_dev *hsi_ctrl)
+{
+ int port;
+ int ch;
+ int err;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++)
+ for (ch = 0; ch < hsi_ctrl->hsi_port[port].max_ch; ch++) {
+ err = reg_hsi_dev_ch(hsi_ctrl, port, ch);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit unregister_hsi_devices(struct hsi_dev *hsi_ctrl)
+{
+ struct hsi_port *hsi_p;
+ struct hsi_device *device;
+ unsigned int port;
+ unsigned int ch;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ for (ch = 0; ch < hsi_p->max_ch; ch++) {
+ device = hsi_p->hsi_channel[ch].dev;
+ hsi_close(device);
+ device_unregister(&device->device);
+ kfree(device);
+ }
+ }
+}
+
+void hsi_set_pm_default(struct hsi_dev *hsi_ctrl)
+{
+ /* Set default SYSCONFIG PM settings */
+ hsi_outl((HSI_AUTOIDLE | HSI_SIDLEMODE_SMART_WAKEUP |
+ HSI_MIDLEMODE_SMART_WAKEUP),
+ hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG);
+ hsi_outl(HSI_CLK_AUTOGATING_ON, hsi_ctrl->base, HSI_GDD_GCR_REG);
+
+ /* HSI_TODO : use the HWMOD API : omap_hwmod_set_slave_idlemode() */
+}
+
+void hsi_set_pm_force_hsi_on(struct hsi_dev *hsi_ctrl)
+{
+ /* Force HSI to ON by never acknowledging a PRCM idle request */
+ /* SIdleAck and MStandby are never asserted */
+ hsi_outl((HSI_AUTOIDLE | HSI_SIDLEMODE_NO |
+ HSI_MIDLEMODE_NO),
+ hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG);
+ hsi_outl(HSI_CLK_AUTOGATING_ON, hsi_ctrl->base, HSI_GDD_GCR_REG);
+
+ /* HSI_TODO : use the HWMOD API : omap_hwmod_set_slave_idlemode() */
+}
+
+int hsi_softreset(struct hsi_dev *hsi_ctrl)
+{
+ unsigned int ind = 0;
+ void __iomem *base = hsi_ctrl->base;
+ u32 status;
+
+ /* Reseting HSI Block */
+ hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG);
+ do {
+ status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG);
+ ind++;
+ } while ((!(status & HSI_RESETDONE)) &&
+ (ind < HSI_RESETDONE_MAX_RETRIES));
+
+ if (ind >= HSI_RESETDONE_MAX_RETRIES) {
+ dev_err(hsi_ctrl->dev, "HSI SW_RESET failed to complete within"
+ " %d retries.\n", HSI_RESETDONE_MAX_RETRIES);
+ return -EIO;
+ } else if (ind > HSI_RESETDONE_NORMAL_RETRIES) {
+ dev_warn(hsi_ctrl->dev, "HSI SW_RESET abnormally long:"
+ " %d retries to complete.\n", ind);
+ }
+
+ ind = 0;
+ /* Reseting DMA Engine */
+ hsi_outl_or(HSI_GDD_GRST_SWRESET, base, HSI_GDD_GRST_REG);
+ do {
+ status = hsi_inl(base, HSI_GDD_GRST_REG);
+ ind++;
+ } while ((status & HSI_GDD_GRST_SWRESET) &&
+ (ind < HSI_RESETDONE_MAX_RETRIES));
+
+ if (ind >= HSI_RESETDONE_MAX_RETRIES) {
+ dev_err(hsi_ctrl->dev, "HSI DMA SW_RESET failed to complete"
+ " within %d retries.\n", HSI_RESETDONE_MAX_RETRIES);
+ return -EIO;
+ }
+
+ if (ind > HSI_RESETDONE_NORMAL_RETRIES) {
+ dev_warn(hsi_ctrl->dev, "HSI DMA SW_RESET abnormally long:"
+ " %d retries to complete.\n", ind);
+ }
+
+ return 0;
+}
+
+static void hsi_set_ports_default(struct hsi_dev *hsi_ctrl,
+ struct platform_device *pd)
+{
+ struct port_ctx *cfg;
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ unsigned int port = 0;
+ void __iomem *base = hsi_ctrl->base;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ for (port = 1; port <= pdata->num_ports; port++) {
+ cfg = &pdata->ctx->pctx[port - 1];
+ /* HST */
+ hsi_outl(cfg->hst.mode | cfg->hst.flow |
+ HSI_HST_MODE_WAKE_CTRL_SW, base,
+ HSI_HST_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(cfg->hst.frame_size, base,
+ HSI_HST_FRAMESIZE_REG(port));
+ hsi_outl(cfg->hst.divisor, base, HSI_HST_DIVISOR_REG(port));
+ hsi_outl(cfg->hst.channels, base, HSI_HST_CHANNELS_REG(port));
+ hsi_outl(cfg->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port));
+
+ /* HSR */
+ hsi_outl(cfg->hsr.mode | cfg->hsr.flow, base,
+ HSI_HSR_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(cfg->hsr.frame_size, base,
+ HSI_HSR_FRAMESIZE_REG(port));
+ hsi_outl(cfg->hsr.channels, base, HSI_HSR_CHANNELS_REG(port));
+ if (hsi_driver_device_is_hsi(pdev))
+ hsi_outl(cfg->hsr.divisor, base,
+ HSI_HSR_DIVISOR_REG(port));
+ hsi_outl(cfg->hsr.counters, base, HSI_HSR_COUNTERS_REG(port));
+ }
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ /* SW strategy for HSI fifo management can be changed here */
+ hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT);
+ hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG);
+ }
+}
+
+static int __init hsi_port_channels_init(struct hsi_port *port)
+{
+ struct hsi_channel *ch;
+ unsigned int ch_i;
+
+ for (ch_i = 0; ch_i < port->max_ch; ch_i++) {
+ ch = &port->hsi_channel[ch_i];
+ ch->channel_number = ch_i;
+ rwlock_init(&ch->rw_lock);
+ ch->flags = 0;
+ ch->hsi_port = port;
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ ch->read_data.lch = -1;
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ ch->write_data.lch = -1;
+ ch->dev = NULL;
+ ch->read_done = NULL;
+ ch->write_done = NULL;
+ ch->port_event = NULL;
+ }
+
+ return 0;
+}
+
+static int hsi_port_channels_reset(struct hsi_port *port)
+{
+ struct hsi_channel *ch;
+ unsigned int ch_i;
+
+ for (ch_i = 0; ch_i < port->max_ch; ch_i++) {
+ ch = &port->hsi_channel[ch_i];
+ ch->flags = 0;
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ ch->read_data.lch = -1;
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ ch->write_data.lch = -1;
+ }
+
+ return 0;
+}
+
+void hsi_softreset_driver(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct hsi_port *hsi_p;
+ unsigned int port;
+ u32 revision;
+
+ /* HSI port reset */
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ hsi_p->counters_on = 1;
+ hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters;
+ hsi_port_channels_reset(&hsi_ctrl->hsi_port[port]);
+ }
+
+ hsi_set_pm_default(hsi_ctrl);
+
+ /* Re-Configure HSI ports */
+ hsi_set_ports_default(hsi_ctrl, pd);
+
+ /* Gather info from registers for the driver.(REVISION) */
+ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
+ if (hsi_driver_device_is_hsi(pd))
+ dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
+ revision);
+ else
+ dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
+ (revision & HSI_SSI_REV_MAJOR) >> 4,
+ (revision & HSI_SSI_REV_MINOR));
+}
+
+static int __init hsi_request_mpu_irq(struct hsi_port *hsi_p)
+{
+ struct hsi_dev *hsi_ctrl = hsi_p->hsi_controller;
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct resource *mpu_irq;
+
+ if (hsi_driver_device_is_hsi(pd))
+ mpu_irq = platform_get_resource(pd, IORESOURCE_IRQ,
+ hsi_p->port_number - 1);
+ else /* SSI support 2 IRQs per port */
+ mpu_irq = platform_get_resource(pd, IORESOURCE_IRQ,
+ (hsi_p->port_number - 1) * 2);
+
+ if (!mpu_irq) {
+ dev_err(hsi_ctrl->dev, "HSI misses info for MPU IRQ on"
+ " port %d\n", hsi_p->port_number);
+ return -ENXIO;
+ }
+ hsi_p->n_irq = 0; /* We only use one irq line */
+ hsi_p->irq = mpu_irq->start;
+ return hsi_mpu_init(hsi_p, mpu_irq->name);
+}
+
+static int __init hsi_request_cawake_irq(struct hsi_port *hsi_p)
+{
+ struct hsi_dev *hsi_ctrl = hsi_p->hsi_controller;
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct resource *cawake_irq;
+
+ if (hsi_driver_device_is_hsi(pd)) {
+ hsi_p->cawake_gpio = -1;
+ return 0;
+ } else {
+ cawake_irq = platform_get_resource(pd, IORESOURCE_IRQ,
+ 4 + hsi_p->port_number);
+ }
+
+ if (!cawake_irq) {
+ dev_err(hsi_ctrl->dev, "SSI device misses info for CAWAKE"
+ "IRQ on port %d\n", hsi_p->port_number);
+ return -ENXIO;
+ }
+
+ if (cawake_irq->flags & IORESOURCE_UNSET) {
+ dev_info(hsi_ctrl->dev, "No CAWAKE GPIO support\n");
+ hsi_p->cawake_gpio = -1;
+ return 0;
+ }
+
+ hsi_p->cawake_gpio_irq = cawake_irq->start;
+ hsi_p->cawake_gpio = irq_to_gpio(cawake_irq->start);
+ return hsi_cawake_init(hsi_p, cawake_irq->name);
+}
+
+static void hsi_ports_exit(struct hsi_dev *hsi_ctrl, unsigned int max_ports)
+{
+ struct hsi_port *hsi_p;
+ unsigned int port;
+
+ for (port = 0; port < max_ports; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ hsi_mpu_exit(hsi_p);
+ hsi_cawake_exit(hsi_p);
+ }
+}
+
+static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct hsi_port *hsi_p;
+ unsigned int port;
+ int err;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ hsi_p->port_number = port + 1;
+ hsi_p->hsi_controller = hsi_ctrl;
+ hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ?
+ HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX;
+ hsi_p->irq = 0;
+ hsi_p->cawake_status = -1; /* Unknown */
+ hsi_p->cawake_off_event = false;
+ hsi_p->acwake_status = 0;
+ hsi_p->in_int_tasklet = false;
+ hsi_p->in_cawake_tasklet = false;
+ hsi_p->counters_on = 1;
+ hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters;
+ spin_lock_init(&hsi_p->lock);
+ err = hsi_port_channels_init(&hsi_ctrl->hsi_port[port]);
+ if (err < 0)
+ goto rback1;
+ err = hsi_request_mpu_irq(hsi_p);
+ if (err < 0)
+ goto rback2;
+ err = hsi_request_cawake_irq(hsi_p);
+ if (err < 0)
+ goto rback3;
+ }
+ return 0;
+rback3:
+ hsi_mpu_exit(hsi_p);
+rback2:
+ hsi_ports_exit(hsi_ctrl, port + 1);
+rback1:
+ return err;
+}
+
+static int __init hsi_request_gdd_irq(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct resource *gdd_irq;
+
+ if (hsi_driver_device_is_hsi(pd))
+ gdd_irq = platform_get_resource(pd, IORESOURCE_IRQ, 2);
+ else
+ gdd_irq = platform_get_resource(pd, IORESOURCE_IRQ, 4);
+
+ if (!gdd_irq) {
+ dev_err(hsi_ctrl->dev, "HSI has no GDD IRQ resource\n");
+ return -ENXIO;
+ }
+
+ hsi_ctrl->gdd_irq = gdd_irq->start;
+ return hsi_gdd_init(hsi_ctrl, gdd_irq->name);
+}
+
+static int __init hsi_init_gdd_chan_count(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ u8 gdd_chan_count;
+ struct hsi_platform_data *pdata =
+ (struct hsi_platform_data *)pd->dev.platform_data;
+ int i;
+
+ if (!pdata) {
+ dev_err(hsi_ctrl->dev, "HSI has no platform data\n");
+ return -ENXIO;
+ }
+
+ gdd_chan_count = pdata->hsi_gdd_chan_count;
+
+ if (!gdd_chan_count) {
+ dev_warn(hsi_ctrl->dev, "HSI device has no GDD channel count "
+ "(use %d as default)\n",
+ HSI_DMA_CHANNEL_DEFAULT);
+ hsi_ctrl->gdd_chan_count = HSI_DMA_CHANNEL_DEFAULT;
+ } else {
+ hsi_ctrl->gdd_chan_count = gdd_chan_count;
+ /* Check that the number of channels is power of 2 */
+ for (i = 0; i < 16; i++) {
+ if (hsi_ctrl->gdd_chan_count == (1 << i))
+ break;
+ }
+ if (i >= 16)
+ dev_err(hsi_ctrl->dev, "The Number of DMA channels "
+ "shall be a power of 2! (=%d)\n",
+ hsi_ctrl->gdd_chan_count);
+ }
+ return 0;
+}
+
+/**
+* hsi_clocks_disable_channel - virtual wrapper for disabling HSI clocks for
+* a given channel
+* @dev - reference to the hsi device.
+* @channel_number - channel number which requests clock to be disabled
+* 0xFF means no particular channel
+*
+* Note : there is no real HW clock management per HSI channel, this is only
+* virtual to keep track of active channels and ease debug
+*
+* Function to be called with lock
+*/
+void hsi_clocks_disable_channel(struct device *dev, u8 channel_number,
+ const char *s)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ if (channel_number != HSI_CH_NUMBER_NONE)
+ dev_dbg(dev, "CLK: hsi_clocks_disable for "
+ "channel %d: %s\n", channel_number, s);
+ else
+ dev_dbg(dev, "CLK: hsi_clocks_disable: %s\n", s);
+
+ if (!hsi_ctrl->clock_enabled) {
+ dev_dbg(dev, "Clocks already disabled, skipping...\n");
+ return;
+ }
+ if (hsi_is_hsi_controller_busy(hsi_ctrl)) {
+ dev_dbg(dev, "Cannot disable clocks, HSI port busy\n");
+ return;
+ }
+
+ if (hsi_is_hst_controller_busy(hsi_ctrl))
+ dev_dbg(dev, "Disabling clocks with HST FSM not IDLE !\n");
+
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Allow Fclk to change */
+ if (dpll_cascading_blocker_release(dev) < 0)
+ dev_warn(dev, "Error releasing DPLL cascading constraint\n");
+#endif
+
+ /* HSI_TODO : this can probably be changed
+ * to return pm_runtime_put(dev);
+ */
+ /*
+ pm_runtime_put_sync(dev);
+ */
+ hsi_runtime_suspend(dev);
+ omap_device_idle(pd);
+}
+
+/**
+* hsi_clocks_enable_channel - virtual wrapper for enabling HSI clocks for
+* a given channel
+* @dev - reference to the hsi device.
+* @channel_number - channel number which requests clock to be enabled
+* 0xFF means no particular channel
+*
+* Note : there is no real HW clock management per HSI channel, this is only
+* virtual to keep track of active channels and ease debug
+*
+* Function to be called with lock
+*/
+int hsi_clocks_enable_channel(struct device *dev, u8 channel_number,
+ const char *s)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ if (channel_number != HSI_CH_NUMBER_NONE)
+ dev_dbg(dev, "CLK: hsi_clocks_enable for "
+ "channel %d: %s\n", channel_number, s);
+ else
+ dev_dbg(dev, "CLK: hsi_clocks_enable: %s\n", s);
+
+ if (hsi_ctrl->clock_enabled) {
+ dev_dbg(dev, "Clocks already enabled, skipping...\n");
+ return -EEXIST;
+ }
+
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Prevent Fclk change */
+ if (dpll_cascading_blocker_hold(dev) < 0)
+ dev_warn(dev, "Error holding DPLL cascading constraint\n");
+#endif
+
+ /*
+ return pm_runtime_get_sync(dev);
+ */
+ omap_device_enable(pd);
+ hsi_runtime_resume(dev);
+ return 0;
+}
+
+static int __init hsi_controller_init(struct hsi_dev *hsi_ctrl,
+ struct platform_device *pd)
+{
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct resource *mem, *ioarea;
+ int err;
+
+ mem = platform_get_resource(pd, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pd->dev, "HSI device does not have "
+ "HSI IO memory region information\n");
+ return -ENXIO;
+ }
+ dev_dbg(&pd->dev, "hsi_controller_init : IORESOURCE_MEM %s [%x, %x]\n",
+ mem->name, mem->start, mem->end);
+
+ ioarea = devm_request_mem_region(&pd->dev, mem->start,
+ (mem->end - mem->start) + 1,
+ dev_name(&pd->dev));
+ if (!ioarea) {
+ dev_err(&pd->dev, "Unable to request HSI IO mem region\n");
+ return -EBUSY;
+ }
+ dev_dbg(&pd->dev, "hsi_controller_init : ioarea %s [%x, %x]\n",
+ ioarea->name, ioarea->start, ioarea->end);
+
+ hsi_ctrl->phy_base = mem->start;
+ hsi_ctrl->base = devm_ioremap(&pd->dev, mem->start,
+ (mem->end - mem->start) + 1);
+ if (!hsi_ctrl->base) {
+ dev_err(&pd->dev, "Unable to ioremap HSI base IO address\n");
+ return -ENXIO;
+ }
+ dev_dbg(&pd->dev, "hsi_controller_init : hsi_ctrl->base=%x\n",
+ (unsigned int)hsi_ctrl->base);
+
+ hsi_ctrl->id = pd->id;
+ if (pdata->num_ports > HSI_MAX_PORTS) {
+ dev_err(&pd->dev, "The HSI driver does not support enough "
+ "ports!\n");
+ return -ENXIO;
+ }
+ hsi_ctrl->max_p = pdata->num_ports;
+ hsi_ctrl->in_dma_tasklet = false;
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_UNDEF;
+ hsi_ctrl->dev = &pd->dev;
+ spin_lock_init(&hsi_ctrl->lock);
+ err = hsi_init_gdd_chan_count(hsi_ctrl);
+ if (err < 0)
+ goto rback1;
+
+ err = hsi_ports_init(hsi_ctrl);
+ if (err < 0)
+ goto rback1;
+
+ err = hsi_request_gdd_irq(hsi_ctrl);
+ if (err < 0)
+ goto rback2;
+
+ /* Everything is fine */
+ return 0;
+rback2:
+ hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p);
+rback1:
+ dev_err(&pd->dev, "Error on hsi_controller initialization\n");
+ return err;
+}
+
+static void hsi_controller_exit(struct hsi_dev *hsi_ctrl)
+{
+ hsi_gdd_exit(hsi_ctrl);
+ hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p);
+}
+
+/* HSI Platform Device probing & hsi_device registration */
+static int __init hsi_platform_device_probe(struct platform_device *pd)
+{
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct hsi_dev *hsi_ctrl;
+ u32 revision;
+ int err;
+
+ dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");
+
+ dev_dbg(&pd->dev, "The platform device probed is an %s\n",
+ hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");
+
+ if (!pdata) {
+ dev_err(&pd->dev, "No platform_data found on hsi device\n");
+ return -ENXIO;
+ }
+
+ hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
+ if (hsi_ctrl == NULL) {
+ dev_err(&pd->dev, "Could not allocate memory for"
+ " struct hsi_dev\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pd, hsi_ctrl);
+ err = hsi_controller_init(hsi_ctrl, pd);
+ if (err < 0) {
+ dev_err(&pd->dev, "Could not initialize hsi controller:"
+ " %d\n", err);
+ goto rollback1;
+ }
+ /* Wakeup dependency was disabled for HSI <-> MPU PM_L3INIT_HSI_WKDEP */
+#if 0
+ omap_writel(0x141, 0x4A307338);
+#endif
+ pm_runtime_enable(hsi_ctrl->dev);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ /* Non critical SW Reset */
+ err = hsi_softreset(hsi_ctrl);
+ if (err < 0)
+ goto rollback2;
+
+ hsi_set_pm_default(hsi_ctrl);
+
+ /* Configure HSI ports */
+ hsi_set_ports_default(hsi_ctrl, pd);
+
+ /* Gather info from registers for the driver.(REVISION) */
+ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
+ if (hsi_driver_device_is_hsi(pd))
+ dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
+ revision);
+ else
+ dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
+ (revision & HSI_SSI_REV_MAJOR) >> 4,
+ (revision & HSI_SSI_REV_MINOR));
+
+ err = hsi_debug_add_ctrl(hsi_ctrl);
+ if (err < 0) {
+ dev_err(&pd->dev,
+ "Could not add hsi controller to debugfs: %d\n", err);
+ goto rollback2;
+ }
+
+ err = register_hsi_devices(hsi_ctrl);
+ if (err < 0) {
+ dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
+ goto rollback3;
+ }
+
+ /* Allow HSI to wake up the platform */
+ device_init_wakeup(hsi_ctrl->dev, true);
+
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Set the HSI FCLK to default. */
+ err = omap_device_set_rate(hsi_ctrl->dev, hsi_ctrl->dev,
+ pdata->default_hsi_fclk);
+ if (err)
+ dev_err(&pd->dev, "Cannot set HSI FClk to default value: %ld\n",
+ pdata->default_hsi_fclk);
+#endif
+
+ /* From here no need for HSI HW access */
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return err;
+
+rollback3:
+ hsi_debug_remove_ctrl(hsi_ctrl);
+rollback2:
+ hsi_controller_exit(hsi_ctrl);
+
+ /* From here no need for HSI HW access */
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+rollback1:
+ kfree(hsi_ctrl);
+ return err;
+}
+
+static int __exit hsi_platform_device_remove(struct platform_device *pd)
+{
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_remove\n");
+
+ if (!hsi_ctrl)
+ return 0;
+
+ unregister_hsi_devices(hsi_ctrl);
+
+ /* From here no need for HSI HW access */
+ pm_runtime_disable(hsi_ctrl->dev);
+
+ hsi_debug_remove_ctrl(hsi_ctrl);
+ hsi_controller_exit(hsi_ctrl);
+
+ kfree(hsi_ctrl);
+
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int hsi_suspend_noirq(struct device *dev)
+{
+ struct hsi_platform_data *pdata = dev->platform_data;
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* If HSI is enabled, CAWAKE IO wakeup has been disabled and */
+ /* we don't want to re-enable it here. HSI interrupt shall be */
+ /* generated normally because HSI HW is ON. */
+ if (hsi_ctrl->clock_enabled) {
+ dev_info(dev, "Platform Suspend while HSI active\n");
+ return 0;
+ }
+
+ /* Perform HSI board specific action before platform suspend */
+ if (pdata->board_suspend)
+ pdata->board_suspend(0, device_may_wakeup(dev));
+
+ return 0;
+}
+
+static int hsi_resume_noirq(struct device *dev)
+{
+ struct hsi_platform_data *pdata = dev->platform_data;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* This function shall not schedule the tasklet, because it is */
+ /* redundant with what is already done in the PRCM interrupt handler. */
+ /* HSI IO checking in PRCM int handler is done when waking up from : */
+ /* - Device OFF mode (wake up from suspend) */
+ /* - L3INIT in RET (Idle mode) */
+ /* hsi_resume_noirq is called only when system wakes up from suspend. */
+ /* So HSI IO checking in PRCM int handler and hsi_resume_noirq are */
+ /* redundant. We need to choose which one will schedule the tasklet */
+ /* Since HSI IO checking in PRCM int handler covers more cases, it is */
+ /* the winner. */
+
+ /* Perform (optional) HSI board specific action after platform wakeup */
+ if (pdata->board_resume)
+ pdata->board_resume(0);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+* hsi_runtime_resume - executed by the PM core for the bus type of the device being woken up
+* @dev - reference to the hsi device.
+*
+*
+*/
+int hsi_runtime_resume(struct device *dev)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (hsi_ctrl->clock_enabled)
+ dev_warn(dev, "Warning: clock status mismatch vs runtime PM\n");
+
+ hsi_ctrl->clock_enabled = true;
+
+ /* Restore context */
+ hsi_restore_ctx(hsi_ctrl);
+
+ /* When HSI is ON, no need for IO wakeup mechanism */
+ pdata->wakeup_disable(0);
+
+ /* HSI device is now fully operational and _must_ be able to */
+ /* complete I/O operations */
+
+ return 0;
+}
+
+/**
+* hsi_runtime_suspend - Prepare HSI for low power : device will not process data and will
+ not communicate with the CPU
+* @dev - reference to the hsi device.
+*
+* Return value : -EBUSY or -EAGAIN if device is busy and still operational
+*
+*/
+int hsi_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ int port;
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!hsi_ctrl->clock_enabled)
+ dev_warn(dev, "Warning: clock status mismatch vs runtime PM\n");
+
+ /* Save context */
+ hsi_save_ctx(hsi_ctrl);
+
+ hsi_ctrl->clock_enabled = false;
+
+ /* Put HSR into SLEEP mode to force ACREADY to low while HSI is idle */
+ for (port = 1; port <= pdata->num_ports; port++) {
+ hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, hsi_ctrl->base,
+ HSI_HSR_MODE_REG(port));
+ }
+
+ /* HSI is going to INA/RET/OFF, it needs IO wakeup mechanism enabled */
+ if (device_may_wakeup(dev))
+ pdata->wakeup_enable(0);
+ else
+ pdata->wakeup_disable(0);
+
+ /* HSI is now ready to be put in low power state */
+
+ return 0;
+}
+
+/* Based on counters, device appears to be idle.
+ * Check if the device can be suspended.
+ */
+static int hsi_runtime_idle(struct device *dev)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (hsi_is_hsi_controller_busy(hsi_ctrl)) {
+ dev_dbg(dev, "hsi_runtime_idle: HSI port busy\n");
+ return -EBUSY;
+ }
+
+ if (hsi_is_hst_controller_busy(hsi_ctrl)) {
+ dev_dbg(dev, "hsi_runtime_idle: HST FSM not IDLE !\n");
+ return -EBUSY;
+ }
+
+ /* HSI_TODO : check also the interrupt status registers.*/
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+int hsi_driver_device_is_hsi(struct platform_device *dev)
+{
+ struct platform_device_id *id =
+ (struct platform_device_id *)platform_get_device_id(dev);
+ return (id->driver_data == HSI_DRV_DEVICE_HSI);
+}
+
+/* List of devices supported by this driver */
+static struct platform_device_id hsi_id_table[] = {
+ {"omap_hsi", HSI_DRV_DEVICE_HSI},
+ {"omap_ssi", HSI_DRV_DEVICE_SSI},
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, hsi_id_table);
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops hsi_driver_pm_ops = {
+#ifdef CONFIG_SUSPEND
+ .suspend_noirq = hsi_suspend_noirq,
+ .resume_noirq = hsi_resume_noirq,
+#endif
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = hsi_runtime_suspend,
+ .runtime_resume = hsi_runtime_resume,
+ .runtime_idle = hsi_runtime_idle,
+#endif
+};
+
+#define HSI_DRIVER_PM_OPS_PTR (&hsi_driver_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define HSI_DRIVER_PM_OPS_PTR NULL
+
+#endif
+
+static struct platform_driver hsi_pdriver = {
+ .driver = {
+ .name = HSI_MODULENAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = HSI_DRIVER_PM_OPS_PTR,
+#endif
+ },
+ .id_table = hsi_id_table,
+ .remove = __exit_p(hsi_platform_device_remove),
+};
+
+/* HSI bus and platform driver registration */
+static int __init hsi_driver_init(void)
+{
+ int err = 0;
+
+ pr_info(LOG_NAME "HSI DRIVER Version " HSI_DRIVER_VERSION "\n");
+
+ /* Register the (virtual) HSI bus */
+ err = hsi_bus_init();
+ if (err < 0) {
+ pr_err(LOG_NAME "HSI bus_register err %d\n", err);
+ return err;
+ }
+
+ err = hsi_debug_init();
+ if (err < 0) {
+ pr_err(LOG_NAME "HSI Debugfs failed %d\n", err);
+ goto rback1;
+ }
+
+ /* Register the HSI platform driver */
+ err = platform_driver_probe(&hsi_pdriver, hsi_platform_device_probe);
+ if (err < 0) {
+ pr_err(LOG_NAME "Platform DRIVER register FAILED: %d\n", err);
+ goto rback2;
+ }
+
+ return 0;
+rback2:
+ hsi_debug_exit();
+rback1:
+ hsi_bus_exit();
+ return err;
+}
+
+static void __exit hsi_driver_exit(void)
+{
+ platform_driver_unregister(&hsi_pdriver);
+ hsi_debug_exit();
+ hsi_bus_exit();
+
+ pr_info(LOG_NAME "HSI DRIVER removed\n");
+}
+
+module_init(hsi_driver_init);
+module_exit(hsi_driver_exit);
+
+MODULE_ALIAS("platform:" HSI_MODULENAME);
+MODULE_AUTHOR("Carlos Chinea / Nokia");
+MODULE_AUTHOR("Sebastien JAN / Texas Instruments");
+MODULE_AUTHOR("Djamil ELAIDI / Texas Instruments");
+MODULE_DESCRIPTION("MIPI High-speed Synchronous Serial Interface (HSI) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/omap_hsi/hsi_driver.h b/drivers/omap_hsi/hsi_driver.h
new file mode 100644
index 0000000..0991d98
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver.h
@@ -0,0 +1,398 @@
+/*
+ * hsi_driver.h
+ *
+ * Header file for the HSI driver low level interface.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef __HSI_DRIVER_H__
+#define __HSI_DRIVER_H__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/notifier.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/hsi_driver_if.h>
+#include <plat/omap_hsi.h>
+
+/* Channel states */
+#define HSI_CH_OPEN 0x01
+#define HSI_CH_RX_POLL 0x10
+#define HSI_CH_ACWAKE 0x02 /* ACWAKE line status */
+
+#define HSI_CH_NUMBER_NONE 0xFF
+/*
+ * The number of channels handled by the driver in the ports, or the highest
+ * port channel number (+1) used. (MAX:8 for SSI; 16 for HSI)
+ * Reducing this value optimizes the driver memory footprint.
+ */
+#define HSI_PORT_MAX_CH HSI_CHANNELS_MAX
+
+/* Number of DMA channels when nothing is defined for the device */
+#define HSI_DMA_CHANNEL_DEFAULT 8
+
+
+#define LOG_NAME "OMAP HSI: "
+
+/* SW strategies for HSI FIFO mapping */
+enum {
+ HSI_FIFO_MAPPING_UNDEF = 0,
+ HSI_FIFO_MAPPING_SSI, /* 8 FIFOs per port (SSI compatible mode) */
+ HSI_FIFO_MAPPING_ALL_PORT1, /* ALL FIFOs mapped on 1st port */
+};
+#define HSI_FIFO_MAPPING_DEFAULT HSI_FIFO_MAPPING_ALL_PORT1
+
+/* Device identifying constants */
+enum {
+ HSI_DRV_DEVICE_HSI,
+ HSI_DRV_DEVICE_SSI
+};
+
+/**
+ * struct hsi_data - HSI buffer descriptor
+ * @addr: pointer to the buffer where to send or receive data
+ * @size: size in words (32 bits) of the buffer
+ * @lch: associated GDD (DMA) logical channel number, if any
+ */
+struct hsi_data {
+ u32 *addr;
+ unsigned int size;
+ int lch;
+};
+
+/**
+ * struct hsi_channel - HSI channel data
+ * @read_data: Incoming HSI buffer descriptor
+ * @write_data: Outgoing HSI buffer descriptor
+ * @hsi_port: Reference to port where the channel belongs to
+ * @flags: Tracks if channel has been open
+ * @channel_number: HSI channel number
+ * @rw_lock: Read/Write lock to serialize access to callback and hsi_device
+ * @dev: Reference to the associated hsi_device channel
+ * @write_done: Callback to signal TX completed.
+ * @read_done: Callback to signal RX completed.
+ * @port_event: Callback to signal port events (RX Error, HWBREAK, CAWAKE ...)
+ */
+struct hsi_channel {
+ struct hsi_data read_data;
+ struct hsi_data write_data;
+ struct hsi_port *hsi_port;
+ u8 flags;
+ u8 channel_number;
+ rwlock_t rw_lock;
+ struct hsi_device *dev;
+ void (*write_done) (struct hsi_device *dev, unsigned int size);
+ void (*read_done) (struct hsi_device *dev, unsigned int size);
+ void (*port_event) (struct hsi_device *dev, unsigned int event,
+ void *arg);
+};
+
+/**
+ * struct hsi_port - hsi port driver data
+ * @hsi_channel: Array of channels in the port
+ * @hsi_controller: Reference to the HSI controller
+ * @port_number: port number
+ * @max_ch: maximum number of channels supported on the port
+ * @n_irq: HSI irq line use to handle interrupts (0 or 1)
+ * @irq: IRQ number
+ * @cawake_gpio: GPIO number for cawake line (-1 if none)
+ * @cawake_gpio_irq: IRQ number for cawake gpio events
+ * @cawake_status: Tracks CAWAKE line status
+ * @cawake_off_event: True if CAWAKE event was detected from OFF mode
+ * @acwake_status: Bitmap to track ACWAKE line status per channel
+ * @in_int_tasklet: True if interrupt tasklet for this port is currently running
+ * @in_cawake_tasklet: True if CAWAKE tasklet for this port is currently running
+ * @counters_on: indicates if the HSR counters are in use or not
+ * @reg_counters: stores the previous counters values when deactivated
+ * @lock: Serialize access to the port registers and internal data
+ * @hsi_tasklet: Bottom half for interrupts when clocks are enabled
+ * @cawake_tasklet: Bottom half for cawake events
+ */
+struct hsi_port {
+ struct hsi_channel hsi_channel[HSI_PORT_MAX_CH];
+ struct hsi_dev *hsi_controller;
+ u8 flags;
+ u8 port_number; /* Range [1,2] */
+ u8 max_ch;
+ u8 n_irq;
+ int irq;
+ int cawake_gpio;
+ int cawake_gpio_irq;
+ int cawake_status;
+ bool cawake_off_event;
+ unsigned int acwake_status; /* HSI_TODO : fine tune init values */
+ bool in_int_tasklet;
+ bool in_cawake_tasklet;
+ int counters_on;
+ unsigned long reg_counters;
+ spinlock_t lock; /* access to the port registers and internal data */
+ struct tasklet_struct hsi_tasklet;
+ struct tasklet_struct cawake_tasklet; /* SSI_TODO : need to replace */
+ /* by a workqueue */
+};
+
+/**
+ * struct hsi_dev - hsi controller driver data
+ * This structure is saved into platform_device->dev->p->driver_data
+ *
+ * @hsi_port: Array of hsi ports enabled in the controller
+ * @id: HSI controller platform id number
+ * @max_p: Number of ports enabled in the controller
+ * @hsi_clk: Reference to the HSI custom clock
+ * @base: HSI registers base virtual address
+ * @phy_base: HSI registers base physical address
+ * @lock: Serializes access to internal data and regs
+ * @clock_enabled: Indicates if HSI Clocks are ON
+ * @gdd_irq: GDD (DMA) irq number
+ * @fifo_mapping_strategy: Selected strategy for fifo to ports/channels mapping
+ * @gdd_usecount: Holds the number of ongoning DMA transfers
+ * @last_gdd_lch: Last used GDD logical channel
+ * @gdd_chan_count: Number of available DMA channels on the device (must be ^2)
+ * @in_dma_tasklet: True if DMA tasklet for the controller is currently running
+ * @set_min_bus_tput: (PM) callback to set minimun bus throuput
+ * @clk_notifier_register: (PM) callabck for DVFS support
+ * @clk_notifier_unregister: (PM) callabck for DVFS support
+ * @hsi_nb: (PM) Notification block for DVFS notification chain
+ * @hsi_gdd_tasklet: Bottom half for DMA Interrupts when clocks are enabled
+ * @dir: debugfs base directory
+ * @dev: Reference to the HSI platform device
+ */
+struct hsi_dev { /* HSI_TODO: should be later renamed into hsi_controller*/
+ struct hsi_port hsi_port[HSI_MAX_PORTS];
+ int id;
+ u8 max_p;
+ void __iomem *base;
+ unsigned long phy_base;
+ spinlock_t lock; /* Serializes access to internal data and regs */
+ bool clock_enabled;
+ int gdd_irq;
+ unsigned int fifo_mapping_strategy;
+ unsigned int gdd_usecount;
+ unsigned int last_gdd_lch;
+ unsigned int gdd_chan_count;
+ bool in_dma_tasklet;
+ void (*set_min_bus_tput) (struct device *dev, u8 agent_id,
+ unsigned long r);
+ struct notifier_block hsi_nb;
+ struct tasklet_struct hsi_gdd_tasklet;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dir;
+#endif
+ struct device *dev;
+};
+
+/**
+ * struct hsi_platform_data - Board specific data
+*/
+struct hsi_platform_data {
+ void (*set_min_bus_tput) (struct device *dev, u8 agent_id,
+ unsigned long r);
+ int (*device_enable) (struct platform_device *pdev);
+ int (*device_shutdown) (struct platform_device *pdev);
+ int (*device_idle) (struct platform_device *pdev);
+ int (*wakeup_enable) (int hsi_port);
+ int (*wakeup_disable) (int hsi_port);
+ int (*wakeup_is_from_hsi) (void);
+ int (*board_suspend)(int hsi_port, bool dev_may_wakeup);
+ int (*board_resume)(int hsi_port);
+ u8 num_ports;
+ struct ctrl_ctx *ctx;
+ u8 hsi_gdd_chan_count;
+ unsigned long default_hsi_fclk;
+};
+
+/* HSI Bus */
+extern struct bus_type hsi_bus_type;
+
+int hsi_port_event_handler(struct hsi_port *p, unsigned int event, void *arg);
+int hsi_bus_init(void);
+void hsi_bus_exit(void);
+/* End HSI Bus */
+
+void hsi_reset_ch_read(struct hsi_channel *ch);
+void hsi_reset_ch_write(struct hsi_channel *ch);
+bool hsi_is_channel_busy(struct hsi_channel *ch);
+bool hsi_is_hsi_port_busy(struct hsi_port *pport);
+bool hsi_is_hsi_controller_busy(struct hsi_dev *hsi_ctrl);
+bool hsi_is_hst_port_busy(struct hsi_port *pport);
+bool hsi_is_hst_controller_busy(struct hsi_dev *hsi_ctrl);
+
+int hsi_driver_enable_interrupt(struct hsi_port *pport, u32 flag);
+int hsi_driver_enable_read_interrupt(struct hsi_channel *hsi_channel,
+ u32 *data);
+int hsi_driver_enable_write_interrupt(struct hsi_channel *hsi_channel,
+ u32 *data);
+bool hsi_is_dma_read_int_pending(struct hsi_dev *hsi_ctrl);
+int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int count);
+int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int count);
+
+int hsi_driver_cancel_read_interrupt(struct hsi_channel *ch);
+int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch);
+void hsi_driver_disable_read_interrupt(struct hsi_channel *ch);
+void hsi_driver_disable_write_interrupt(struct hsi_channel *ch);
+int hsi_driver_cancel_write_dma(struct hsi_channel *ch);
+int hsi_driver_cancel_read_dma(struct hsi_channel *ch);
+int hsi_do_cawake_process(struct hsi_port *pport);
+
+int hsi_driver_device_is_hsi(struct platform_device *dev);
+
+int hsi_mpu_init(struct hsi_port *hsi_p, const char *irq_name);
+void hsi_mpu_exit(struct hsi_port *hsi_p);
+
+int hsi_gdd_init(struct hsi_dev *hsi_ctrl, const char *irq_name);
+void hsi_gdd_exit(struct hsi_dev *hsi_ctrl);
+
+int hsi_cawake_init(struct hsi_port *port, const char *irq_name);
+void hsi_cawake_exit(struct hsi_port *port);
+
+int hsi_fifo_get_id(struct hsi_dev *hsi_ctrl, unsigned int channel,
+ unsigned int port);
+int hsi_fifo_get_chan(struct hsi_dev *hsi_ctrl, unsigned int fifo,
+ unsigned int *channel, unsigned int *port);
+int hsi_fifo_mapping(struct hsi_dev *hsi_ctrl, unsigned int mtype);
+long hsi_hst_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+long hsi_hsr_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+long hsi_hst_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+long hsi_hsr_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+u8 hsi_get_rx_fifo_occupancy(struct hsi_dev *hsi_ctrl, u8 fifo);
+void hsi_set_pm_force_hsi_on(struct hsi_dev *hsi_ctrl);
+void hsi_set_pm_default(struct hsi_dev *hsi_ctrl);
+int hsi_softreset(struct hsi_dev *hsi_ctrl);
+void hsi_softreset_driver(struct hsi_dev *hsi_ctrl);
+
+void hsi_clocks_disable_channel(struct device *dev, u8 channel_number,
+ const char *s);
+int hsi_clocks_enable_channel(struct device *dev, u8 channel_number,
+ const char *s);
+#ifdef CONFIG_PM_RUNTIME
+extern int hsi_runtime_resume(struct device *dev);
+extern int hsi_runtime_suspend(struct device *dev);
+#else
+static inline int hsi_runtime_resume(struct device *dev) { return -ENOSYS; }
+static inline int hsi_runtime_suspend(struct device *dev) { return -ENOSYS; }
+#endif
+void hsi_save_ctx(struct hsi_dev *hsi_ctrl);
+void hsi_restore_ctx(struct hsi_dev *hsi_ctrl);
+
+
+#ifdef CONFIG_DEBUG_FS
+int hsi_debug_init(void);
+void hsi_debug_exit(void);
+int hsi_debug_add_ctrl(struct hsi_dev *hsi_ctrl);
+void hsi_debug_remove_ctrl(struct hsi_dev *hsi_ctrl);
+#else
+#define hsi_debug_add_ctrl(hsi_ctrl) 0
+#define hsi_debug_remove_ctrl(hsi_ctrl)
+#define hsi_debug_init() 0
+#define hsi_debug_exit()
+#endif /* CONFIG_DEBUG_FS */
+
+static inline struct hsi_channel *hsi_ctrl_get_ch(struct hsi_dev *hsi_ctrl,
+ unsigned int port,
+ unsigned int channel)
+{
+ return &hsi_ctrl->hsi_port[port - 1].hsi_channel[channel];
+}
+
+/* HSI IO access */
+static inline u32 hsi_inl(void __iomem *base, u32 offset)
+{
+ return inl((unsigned int)base + offset);
+}
+
+static inline void hsi_outl(u32 data, void __iomem *base, u32 offset)
+{
+ outl(data, (unsigned int)base + offset);
+}
+
+static inline void hsi_outl_or(u32 data, void __iomem *base, u32 offset)
+{
+ u32 tmp = hsi_inl(base, offset);
+ hsi_outl((tmp | data), base, offset);
+}
+
+static inline void hsi_outl_and(u32 data, void __iomem *base, u32 offset)
+{
+ u32 tmp = hsi_inl(base, offset);
+ hsi_outl((tmp & data), base, offset);
+}
+
+static inline u16 hsi_inw(void __iomem *base, u32 offset)
+{
+ return inw((unsigned int)base + offset);
+}
+
+static inline void hsi_outw(u16 data, void __iomem *base, u32 offset)
+{
+ outw(data, (unsigned int)base + offset);
+}
+
+static inline void hsi_outw_or(u16 data, void __iomem *base, u32 offset)
+{
+ u16 tmp = hsi_inw(base, offset);
+ hsi_outw((tmp | data), base, offset);
+}
+
+static inline void hsi_outw_and(u16 data, void __iomem *base, u32 offset)
+{
+ u16 tmp = hsi_inw(base, offset);
+ hsi_outw((tmp & data), base, offset);
+}
+
+static inline int hsi_get_cawake(struct hsi_port *port)
+{
+ struct platform_device *pdev =
+ to_platform_device(port->hsi_controller->dev);
+
+ if (hsi_driver_device_is_hsi(pdev))
+ return (HSI_HSR_MODE_WAKE_STATUS ==
+ (hsi_inl(port->hsi_controller->base,
+ HSI_HSR_MODE_REG(port->port_number)) &
+ HSI_HSR_MODE_WAKE_STATUS));
+ else if (port->cawake_gpio >= 0)
+ return gpio_get_value(port->cawake_gpio);
+ else
+ return -ENXIO;
+}
+
+static inline void hsi_clocks_disable(struct device *dev, const char *s)
+{
+ hsi_clocks_disable_channel(dev, HSI_CH_NUMBER_NONE, s);
+}
+
+static inline int hsi_clocks_enable(struct device *dev, const char *s)
+{
+ return hsi_clocks_enable_channel(dev, HSI_CH_NUMBER_NONE, s);
+}
+
+#endif /* __HSI_DRIVER_H__ */
diff --git a/drivers/omap_hsi/hsi_driver_bus.c b/drivers/omap_hsi/hsi_driver_bus.c
new file mode 100644
index 0000000..4bce43d
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_bus.c
@@ -0,0 +1,203 @@
+/*
+ * hsi_driver_bus.c
+ *
+ * Implements an HSI bus, device and driver interface.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/device.h>
+#include "hsi_driver.h"
+
+#define HSI_PREFIX "hsi:"
+
+struct bus_type hsi_bus_type;
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE + 1, "%s%s\n", HSI_PREFIX,
+ dev_name(dev));
+}
+
+static struct device_attribute hsi_dev_attrs[] = {
+ __ATTR_RO(modalias),
+ __ATTR_NULL,
+};
+
+static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ add_uevent_var(env, "MODALIAS=%s%s", HSI_PREFIX, dev_name(dev));
+ return 0;
+}
+
+static int hsi_bus_match(struct device *device, struct device_driver *driver)
+{
+ struct hsi_device *dev = to_hsi_device(device);
+ struct hsi_device_driver *drv = to_hsi_device_driver(driver);
+
+ pr_debug("HSI DRIVER BUS : hsi_bus_match for ctrl:%d, port:%d, ch%d\n",
+ dev->n_ctrl, dev->n_p, dev->n_ch);
+
+ if (!test_bit(dev->n_ctrl, &drv->ctrl_mask))
+ return 0;
+
+ if (!test_bit(dev->n_ch, &drv->ch_mask[dev->n_p]))
+ return 0;
+
+ pr_info
+ ("HSI DRIVER BUS : hsi_bus_match SUCCESS : ctrl:%d (mask:%x),"
+ " port:%d, ch:%d (mask:%x)\n",
+ dev->n_ctrl, (u32) drv->ctrl_mask, dev->n_p, dev->n_ch,
+ (u32) drv->ch_mask[dev->n_p]);
+
+ return 1;
+}
+
+int hsi_bus_unreg_dev(struct device *device, void *p)
+{
+ device->release(device);
+ device_unregister(device);
+
+ return 0;
+}
+
+int __init hsi_bus_init(void)
+{
+ return bus_register(&hsi_bus_type);
+}
+
+void hsi_bus_exit(void)
+{
+ bus_for_each_dev(&hsi_bus_type, NULL, NULL, hsi_bus_unreg_dev);
+ bus_unregister(&hsi_bus_type);
+}
+
+static int hsi_bus_probe(struct device *dev)
+{
+ struct hsi_device_driver *drv;
+ int rc;
+
+ pr_debug("HSI DRIVER BUS : hsi_bus_probe\n");
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+
+ if (!drv->probe)
+ return -ENODEV;
+
+ rc = drv->probe(to_hsi_device(dev));
+
+ return rc;
+}
+
+static int hsi_bus_remove(struct device *dev)
+{
+ struct hsi_device_driver *drv;
+ int ret;
+
+ pr_debug("HSI DRIVER BUS : hsi_bus_remove\n");
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+ if (drv->remove) {
+ ret = drv->remove(to_hsi_device(dev));
+ } else {
+ dev->driver = NULL;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int hsi_bus_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct hsi_device_driver *drv;
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+ if (!drv->suspend)
+ return 0;
+
+ return drv->suspend(to_hsi_device(dev), mesg);
+}
+
+static int hsi_bus_resume(struct device *dev)
+{
+ struct hsi_device_driver *drv;
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+ if (!drv->resume)
+ return 0;
+
+ return drv->resume(to_hsi_device(dev));
+}
+
+struct bus_type hsi_bus_type = {
+ .name = "hsi",
+ .dev_attrs = hsi_dev_attrs,
+ .match = hsi_bus_match,
+ .uevent = hsi_bus_uevent,
+ .probe = hsi_bus_probe,
+ .remove = hsi_bus_remove,
+ .suspend = hsi_bus_suspend,
+ .resume = hsi_bus_resume,
+};
+
+/**
+ * hsi_register_driver - Register HSI device driver
+ * @driver - reference to the HSI device driver.
+ */
+int hsi_register_driver(struct hsi_device_driver *driver)
+{
+ int ret = 0;
+
+ if (driver == NULL)
+ return -EINVAL;
+
+ driver->driver.bus = &hsi_bus_type;
+
+ ret = driver_register(&driver->driver);
+
+ if (ret == 0)
+ pr_debug("hsi: driver %s registered\n", driver->driver.name);
+
+ return ret;
+}
+EXPORT_SYMBOL(hsi_register_driver);
+
+/**
+ * hsi_unregister_driver - Unregister HSI device driver
+ * @driver - reference to the HSI device driver.
+ */
+void hsi_unregister_driver(struct hsi_device_driver *driver)
+{
+ if (driver == NULL)
+ return;
+
+ driver_unregister(&driver->driver);
+
+ pr_debug("hsi: driver %s unregistered\n", driver->driver.name);
+}
+EXPORT_SYMBOL(hsi_unregister_driver);
diff --git a/drivers/omap_hsi/hsi_driver_debugfs.c b/drivers/omap_hsi/hsi_driver_debugfs.c
new file mode 100644
index 0000000..d1f32dd
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_debugfs.c
@@ -0,0 +1,500 @@
+/*
+ * hsi_driver_debugfs.c
+ *
+ * Implements HSI debugfs.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include "hsi_driver.h"
+
+#define HSI_DIR_NAME_SIZE 64
+
+static struct dentry *hsi_dir;
+
+static int hsi_debug_show(struct seq_file *m, void *p)
+{
+ struct hsi_dev *hsi_ctrl = m->private;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ seq_printf(m, "REVISION\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG));
+ if (hsi_driver_device_is_hsi(pdev))
+ seq_printf(m, "HWINFO\t\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_HWINFO_REG));
+ seq_printf(m, "SYSCONFIG\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG));
+ seq_printf(m, "SYSSTATUS\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_SYSSTATUS_REG));
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return 0;
+}
+
+static int hsi_debug_port_show(struct seq_file *m, void *p)
+{
+ struct hsi_port *hsi_port = m->private;
+ struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_port->port_number;
+ int ch, fifo;
+ long buff_offset;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ if (hsi_port->cawake_gpio >= 0)
+ seq_printf(m, "CAWAKE\t\t: %d\n", hsi_get_cawake(hsi_port));
+
+ seq_printf(m, "WAKE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_SYS_WAKE_REG(port)));
+ seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
+ hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_REG(port, hsi_port->n_irq)));
+ seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
+ hsi_inl(base,
+ HSI_SYS_MPU_STATUS_REG(port, hsi_port->n_irq)));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "MPU_U_ENABLE_IRQ%d\t: 0x%08x\n",
+ hsi_port->n_irq,
+ hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port,
+ hsi_port->n_irq)));
+ seq_printf(m, "MPU_U_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
+ hsi_inl(base,
+ HSI_SYS_MPU_U_STATUS_REG(port,
+ hsi_port->n_irq)));
+ }
+ /* HST */
+ seq_printf(m, "\nHST\n===\n");
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_MODE_REG(port)));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)));
+ seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_DIVISOR_REG(port)));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_CHANNELS_REG(port)));
+ seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_ARBMODE_REG(port)));
+ seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_TXSTATE_REG(port)));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "BUFSTATE P1\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BUFSTATE_REG(1)));
+ seq_printf(m, "BUFSTATE P2\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BUFSTATE_REG(2)));
+ } else {
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BUFSTATE_REG(port)));
+ }
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BREAK_REG(port)));
+ for (ch = 0; ch < 8; ch++) {
+ buff_offset = hsi_hst_buffer_reg(hsi_ctrl, port, ch);
+ if (buff_offset >= 0)
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ hsi_inl(base, buff_offset));
+ }
+ if (hsi_driver_device_is_hsi(pdev)) {
+ for (fifo = 0; fifo < HSI_HST_FIFO_COUNT; fifo++) {
+ seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo,
+ hsi_inl(base,
+ HSI_HST_MAPPING_FIFO_REG(fifo)));
+ }
+ }
+ /* HSR */
+ seq_printf(m, "\nHSR\n===\n");
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_MODE_REG(port)));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_CHANNELS_REG(port)));
+ seq_printf(m, "COUNTERS\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_COUNTERS_REG(port)));
+ seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_RXSTATE_REG(port)));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "BUFSTATE P1\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BUFSTATE_REG(1)));
+ seq_printf(m, "BUFSTATE P2\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BUFSTATE_REG(2)));
+ } else {
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BUFSTATE_REG(port)));
+ }
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BREAK_REG(port)));
+ seq_printf(m, "ERROR\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_ERROR_REG(port)));
+ seq_printf(m, "ERRORACK\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_ERRORACK_REG(port)));
+ for (ch = 0; ch < 8; ch++) {
+ buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, port, ch);
+ if (buff_offset >= 0)
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ hsi_inl(base, buff_offset));
+ }
+ if (hsi_driver_device_is_hsi(pdev)) {
+ for (fifo = 0; fifo < HSI_HSR_FIFO_COUNT; fifo++) {
+ seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo,
+ hsi_inl(base,
+ HSI_HSR_MAPPING_FIFO_REG(fifo)));
+ }
+ seq_printf(m, "DLL\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_DLL_REG));
+ seq_printf(m, "DIVISOR\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_DIVISOR_REG(port)));
+ }
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return 0;
+}
+
+static int hsi_debug_gdd_show(struct seq_file *m, void *p)
+{
+ struct hsi_dev *hsi_ctrl = m->private;
+ void __iomem *base = hsi_ctrl->base;
+ int lch;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n",
+ hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG));
+ seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n",
+ hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG));
+
+ if (!hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "HW_ID\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_HW_ID_REG));
+ seq_printf(m, "PPORT_ID\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_PPORT_ID_REG));
+ seq_printf(m, "MPORT_ID\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_MPORT_ID_REG));
+ seq_printf(m, "TEST\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_TEST_REG));
+ }
+
+ seq_printf(m, "GCR\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_GCR_REG));
+
+ for (lch = 0; lch < hsi_ctrl->gdd_chan_count; lch++) {
+ seq_printf(m, "\nGDD LCH %d\n=========\n", lch);
+ seq_printf(m, "CSDP\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CSDP_REG(lch)));
+ seq_printf(m, "CCR\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CCR_REG(lch)));
+ seq_printf(m, "CICR\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CCIR_REG(lch)));
+ seq_printf(m, "CSR\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CSR_REG(lch)));
+ seq_printf(m, "CSSA\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_GDD_CSSA_REG(lch)));
+ seq_printf(m, "CDSA\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_GDD_CDSA_REG(lch)));
+ seq_printf(m, "CEN\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CEN_REG(lch)));
+ seq_printf(m, "CSAC\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CSAC_REG(lch)));
+ seq_printf(m, "CDAC\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CDAC_REG(lch)));
+ if (!hsi_driver_device_is_hsi(pdev))
+ seq_printf(m, "CLNK_CTRL\t: 0x%04x\n",
+ hsi_inw(base,
+ HSI_SSI_GDD_CLNK_CTRL_REG(lch)));
+ }
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return 0;
+}
+
+static int hsi_port_counters_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int hsi_port_counters_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static loff_t hsi_port_counters_seek(struct file *file, loff_t off, int whence)
+{
+ return 0;
+}
+
+static ssize_t hsi_port_counters_read(struct file *filep, char __user * buff,
+ size_t count, loff_t *offp)
+{
+ ssize_t ret;
+ struct hsi_port *hsi_port = filep->private_data;
+ struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_port->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ char str[50];
+ unsigned long reg;
+
+ if (*offp > 0) {
+ ret = 0;
+ goto hsi_cnt_rd_bk;
+ }
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ reg = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ sprintf(str, "FT:%d, TB:%d, FB:%d\n",
+ (int)(reg & HSI_COUNTERS_FT_MASK) >>
+ HSI_COUNTERS_FT_OFFSET,
+ (int)(reg & HSI_COUNTERS_TB_MASK) >>
+ HSI_COUNTERS_TB_OFFSET,
+ (int)(reg & HSI_COUNTERS_FB_MASK) >>
+ HSI_COUNTERS_FB_OFFSET);
+ } else {
+ sprintf(str, "timeout:%d\n", (int)reg);
+ }
+
+ ret = strlen(str);
+ if (copy_to_user((void __user *)buff, str, ret)) {
+ dev_err(hsi_ctrl->dev, "copy_to_user failed\n");
+ ret = 0;
+ } else {
+ *offp = ret;
+ }
+
+hsi_cnt_rd_bk:
+ return ret;
+}
+
+/*
+ * Split the buffer `buf' into space-separated words.
+ * Return the number of words or <0 on error.
+ */
+static int hsi_debug_tokenize(char *buf, char *words[], int maxwords)
+{
+ int nwords = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* Skip leading whitespace */
+ while (*buf && isspace(*buf))
+ buf++;
+ if (!*buf)
+ break; /* oh, it was trailing whitespace */
+
+ /* Run `end' over a word */
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+ /* `buf' is the start of the word, `end' is one past the end */
+
+ if (nwords == maxwords)
+ return -EINVAL; /* ran out of words[] before bytes */
+ if (*end)
+ *end++ = '\0'; /* terminate the word */
+ words[nwords++] = buf;
+ buf = end;
+ }
+ return nwords;
+}
+
+static ssize_t hsi_port_counters_write(struct file *filep,
+ const char __user *buff, size_t count,
+ loff_t *offp)
+{
+ ssize_t ret;
+ struct hsi_port *hsi_port = filep->private_data;
+ struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_port->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+#define MAXWORDS 4
+ int nwords;
+ char *words[MAXWORDS];
+ char tmpbuf[256];
+ unsigned long reg, ft, tb, fb;
+
+ if (count == 0)
+ return 0;
+ if (count > sizeof(tmpbuf) - 1)
+ return -E2BIG;
+ if (copy_from_user(tmpbuf, buff, count))
+ return -EFAULT;
+ tmpbuf[count] = '\0';
+ dev_dbg(hsi_ctrl->dev, "%s: read %d bytes from userspace\n",
+ __func__, (int)count);
+
+ nwords = hsi_debug_tokenize(tmpbuf, words, MAXWORDS);
+ if (nwords < 0) {
+ dev_warn(hsi_ctrl->dev,
+ "HSI counters write usage: echo <values> > counters\n");
+ return -EINVAL;
+ }
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (nwords != 3) {
+ dev_warn(hsi_ctrl->dev, "HSI counters write usage: "
+ "echo \"FT TB FB\" > counters\n");
+ ret = -EINVAL;
+ goto hsi_cnt_w_bk1;
+ }
+ strict_strtoul(words[0], 0, &ft);
+ strict_strtoul(words[1], 0, &tb);
+ strict_strtoul(words[2], 0, &fb);
+ reg = ((ft << HSI_COUNTERS_FT_OFFSET & HSI_COUNTERS_FT_MASK) |
+ (tb << HSI_COUNTERS_TB_OFFSET & HSI_COUNTERS_TB_MASK) |
+ (fb << HSI_COUNTERS_FB_OFFSET & HSI_COUNTERS_FB_MASK));
+ } else {
+ if (nwords != 1) {
+ dev_warn(hsi_ctrl->dev, "HSI counters write usage: "
+ "echo \"timeout\" > counters\n");
+ ret = -EINVAL;
+ goto hsi_cnt_w_bk1;
+ }
+ strict_strtoul(words[0], 0, &reg);
+ }
+ hsi_outl(reg, base, HSI_HSR_COUNTERS_REG(port));
+ ret = count;
+ *offp += count;
+
+hsi_cnt_w_bk1:
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return ret;
+}
+
+static int hsi_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hsi_debug_show, inode->i_private);
+}
+
+static int hsi_port_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hsi_debug_port_show, inode->i_private);
+}
+
+static int hsi_gdd_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hsi_debug_gdd_show, inode->i_private);
+}
+
+static const struct file_operations hsi_regs_fops = {
+ .open = hsi_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations hsi_port_regs_fops = {
+ .open = hsi_port_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations hsi_port_counters_fops = {
+ .open = hsi_port_counters_open,
+ .read = hsi_port_counters_read,
+ .write = hsi_port_counters_write,
+ .llseek = hsi_port_counters_seek,
+ .release = hsi_port_counters_release,
+};
+
+static const struct file_operations hsi_gdd_regs_fops = {
+ .open = hsi_gdd_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init hsi_debug_add_ctrl(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ unsigned char dir_name[HSI_DIR_NAME_SIZE];
+ struct dentry *dir;
+ unsigned int port;
+
+ if (pdev->id < 0) {
+ hsi_ctrl->dir = debugfs_create_dir(pdev->name, hsi_dir);
+ } else {
+ snprintf(dir_name, sizeof(dir_name), "%s%d", pdev->name,
+ pdev->id);
+ hsi_ctrl->dir = debugfs_create_dir(dir_name, hsi_dir);
+ }
+ if (IS_ERR(hsi_ctrl->dir))
+ return PTR_ERR(hsi_ctrl->dir);
+
+ debugfs_create_file("regs", S_IRUGO, hsi_ctrl->dir, hsi_ctrl,
+ &hsi_regs_fops);
+
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ snprintf(dir_name, sizeof(dir_name), "port%d", port + 1);
+ dir = debugfs_create_dir(dir_name, hsi_ctrl->dir);
+ if (IS_ERR(dir))
+ goto rback;
+ debugfs_create_file("regs", S_IRUGO, dir,
+ &hsi_ctrl->hsi_port[port],
+ &hsi_port_regs_fops);
+ debugfs_create_file("counters", S_IRUGO | S_IWUGO, dir,
+ &hsi_ctrl->hsi_port[port],
+ &hsi_port_counters_fops);
+ }
+
+ dir = debugfs_create_dir("gdd", hsi_ctrl->dir);
+ if (IS_ERR(dir))
+ goto rback;
+ debugfs_create_file("regs", S_IRUGO, dir, hsi_ctrl, &hsi_gdd_regs_fops);
+
+ return 0;
+rback:
+ debugfs_remove_recursive(hsi_ctrl->dir);
+ return PTR_ERR(dir);
+}
+
+void hsi_debug_remove_ctrl(struct hsi_dev *hsi_ctrl)
+{
+ debugfs_remove_recursive(hsi_ctrl->dir);
+}
+
+int __init hsi_debug_init(void)
+{
+ hsi_dir = debugfs_create_dir("hsi", NULL);
+ if (IS_ERR(hsi_dir))
+ return PTR_ERR(hsi_dir);
+
+ return 0;
+}
+
+void hsi_debug_exit(void)
+{
+ debugfs_remove_recursive(hsi_dir);
+}
diff --git a/drivers/omap_hsi/hsi_driver_dma.c b/drivers/omap_hsi/hsi_driver_dma.c
new file mode 100644
index 0000000..ad819f5
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_dma.c
@@ -0,0 +1,643 @@
+/*
+ * hsi_driver_dma.c
+ *
+ * Implements HSI low level interface driver functionality with DMA support.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "hsi_driver.h"
+
+#define HSI_SYNC_WRITE 0
+#define HSI_SYNC_READ 1
+#define HSI_L3_TPUT 13428 /* 13428 KiB/s => ~110 Mbit/s */
+
+static unsigned char hsi_sync_table[2][2][8] = {
+ {
+ {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ {0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00}
+ }, {
+ {0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17},
+ {0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}
+ }
+};
+
+/**
+ * hsi_is_dma_read_int_pending - Indicates if a DMA read interrupt is pending
+ * @hsi_ctrl - HSI controller of the GDD.
+ *
+ * Needs to be called holding the hsi_controller lock
+ *
+ * Returns true if DMA read interrupt is pending, else false
+ */
+bool hsi_is_dma_read_int_pending(struct hsi_dev *hsi_ctrl)
+{
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int gdd_lch = 0;
+ u32 status_reg = 0;
+ int i, j;
+ status_reg = hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ if (!status_reg)
+ return false;
+
+ /* Scan all enabled DMA channels */
+ for (gdd_lch = 0; gdd_lch < hsi_ctrl->gdd_chan_count; gdd_lch++) {
+ if (!(status_reg & HSI_GDD_LCH(gdd_lch)))
+ continue;
+ for (i = 0; i < hsi_ctrl->max_p; i++)
+ for (j = 0; j < hsi_ctrl->hsi_port[i].max_ch; j++)
+ if (hsi_ctrl->hsi_port[i].
+ hsi_channel[j].read_data.lch == gdd_lch)
+ return true;
+ }
+ return false;
+}
+/**
+ * hsi_get_free_lch - Get a free GDD(DMA) logical channel
+ * @hsi_ctrl - HSI controller of the GDD.
+ *
+ * Needs to be called holding the hsi_controller lock
+ *
+ * Returns the logical channel number, or -EBUSY if none available
+ */
+static int hsi_get_free_lch(struct hsi_dev *hsi_ctrl)
+{
+ unsigned int enable_reg;
+ int i, lch;
+
+ enable_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ lch = hsi_ctrl->last_gdd_lch;
+ for (i = 0; i < hsi_ctrl->gdd_chan_count; i++) {
+ if (++lch >= hsi_ctrl->gdd_chan_count)
+ lch = 0;
+ if ((enable_reg & HSI_GDD_LCH(lch)) == 0) {
+ hsi_ctrl->last_gdd_lch = lch;
+ return lch;
+ }
+ }
+ return -EBUSY;
+}
+
+/**
+ * hsi_driver_write_dma - Program GDD [DMA] to write data from memory to
+ * the hsi channel buffer.
+ * @hsi_channel - pointer to the hsi_channel to write data to.
+ * @data - 32-bit word pointer to the data.
+ * @size - Number of 32bit words to be transfered.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return 0 on success and < 0 on error.
+ */
+int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int size)
+{
+ struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_channel->hsi_port->port_number;
+ unsigned int channel = hsi_channel->channel_number;
+ unsigned int sync;
+ int lch;
+ dma_addr_t src_addr;
+ dma_addr_t dest_addr;
+ u16 tmp;
+ int fifo;
+
+ if ((size < 1) || (data == NULL))
+ return -EINVAL;
+
+ lch = hsi_get_free_lch(hsi_ctrl);
+ if (lch < 0) {
+ dev_err(hsi_ctrl->dev, "No free DMA channels.\n");
+ return -EBUSY; /* No free GDD logical channels. */
+ } else {
+ dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for write on"
+ " HSI channel %d.\n", lch,
+ hsi_channel->channel_number);
+ }
+
+ /* NOTE: Getting a free gdd logical channel and
+ * reserve it must be done atomicaly. */
+ hsi_channel->write_data.lch = lch;
+
+ /* Sync is required for SSI but not for HSI */
+ sync = hsi_sync_table[HSI_SYNC_WRITE][port - 1][channel];
+
+ src_addr = dma_map_single(hsi_ctrl->dev, data, size * 4, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hsi_ctrl->dev, src_addr))) {
+ dev_err(hsi_ctrl->dev, "Failed to create DMA write mapping.\n");
+ return -ENOMEM;
+ }
+
+ tmp = HSI_SRC_SINGLE_ACCESS0 |
+ HSI_SRC_MEMORY_PORT |
+ HSI_DST_SINGLE_ACCESS0 |
+ HSI_DST_PERIPHERAL_PORT | HSI_DATA_TYPE_S32;
+ hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch));
+
+ tmp = HSI_SRC_AMODE_POSTINC | HSI_DST_AMODE_CONST | sync;
+ hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch));
+
+ hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch));
+
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA "
+ "transfer to FIFO.\n");
+ return -EFAULT;
+ }
+ /* HSI CDSA register takes a FIFO ID when copying to FIFO */
+ hsi_outl(fifo, base, HSI_GDD_CDSA_REG(lch));
+ } else {
+ dest_addr = hsi_ctrl->phy_base + HSI_HST_BUFFER_CH_REG(port,
+ channel);
+ /* SSI CDSA register always takes a 32-bit address */
+ hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch));
+ }
+
+ /* HSI CSSA register takes a 32-bit address when copying from memory */
+ /* SSI CSSA register always takes a 32-bit address */
+ hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch));
+ hsi_outw(size, base, HSI_GDD_CEN_REG(lch));
+
+ /* TODO : Need to clean interrupt status here to avoid spurious int */
+
+ hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));
+
+ return 0;
+}
+
+/**
+ * hsi_driver_read_dma - Program GDD [DMA] to write data to memory from
+ * the hsi channel buffer.
+ * @hsi_channel - pointer to the hsi_channel to read data from.
+ * @data - 32-bit word pointer where to store the incoming data.
+ * @size - Number of 32bit words to be transfered to the buffer.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return 0 on success and < 0 on error.
+ */
+int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int count)
+{
+ struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_channel->hsi_port->port_number;
+ unsigned int channel = hsi_channel->channel_number;
+ unsigned int sync;
+ int lch;
+ dma_addr_t src_addr;
+ dma_addr_t dest_addr;
+ u16 tmp;
+ int fifo;
+
+ lch = hsi_get_free_lch(hsi_ctrl);
+ if (lch < 0) {
+ dev_err(hsi_ctrl->dev, "No free DMA channels.\n");
+ return -EBUSY; /* No free GDD logical channels. */
+ } else {
+ dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for read on"
+ " HSI channel %d.\n", lch,
+ hsi_channel->channel_number);
+ }
+
+ /* When DMA is used for Rx, disable the Rx Interrupt.
+ * (else DATAAVAILLABLE event would get triggered on first
+ * received data word)
+ * (Rx interrupt might be active for polling feature)
+ */
+#if 0
+ if (omap_readl(0x4A05A810)) {
+ dev_err(hsi_ctrl->dev,
+ "READ INTERRUPT IS PENDING DMA() but still disabling %0x\n",
+ omap_readl(0x4A05A810));
+ }
+#endif
+ hsi_driver_disable_read_interrupt(hsi_channel);
+
+ /*
+ * NOTE: Gettting a free gdd logical channel and
+ * reserve it must be done atomicaly.
+ */
+ hsi_channel->read_data.lch = lch;
+
+ /* Sync is required for SSI but not for HSI */
+ sync = hsi_sync_table[HSI_SYNC_READ][port - 1][channel];
+
+ dest_addr = dma_map_single(hsi_ctrl->dev, data, count * 4,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hsi_ctrl->dev, dest_addr))) {
+ dev_err(hsi_ctrl->dev, "Failed to create DMA read mapping.\n");
+ return -ENOMEM;
+ }
+
+ tmp = HSI_DST_SINGLE_ACCESS0 |
+ HSI_DST_MEMORY_PORT |
+ HSI_SRC_SINGLE_ACCESS0 |
+ HSI_SRC_PERIPHERAL_PORT | HSI_DATA_TYPE_S32;
+ hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch));
+
+ tmp = HSI_DST_AMODE_POSTINC | HSI_SRC_AMODE_CONST | sync;
+ hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch));
+
+ hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch));
+
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA "
+ "transfer from FIFO.\n");
+ return -EFAULT;
+ }
+ /* HSI CSSA register takes a FIFO ID when copying from FIFO */
+ hsi_outl(fifo, base, HSI_GDD_CSSA_REG(lch));
+ } else{
+ src_addr = hsi_ctrl->phy_base + HSI_HSR_BUFFER_CH_REG(port,
+ channel);
+ /* SSI CSSA register always takes a 32-bit address */
+ hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch));
+ }
+
+ /* HSI CDSA register takes a 32-bit address when copying to memory */
+ /* SSI CDSA register always takes a 32-bit address */
+ hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch));
+ hsi_outw(count, base, HSI_GDD_CEN_REG(lch));
+
+ /* TODO : Need to clean interrupt status here to avoid spurious int */
+
+ hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));
+
+ return 0;
+}
+
+/**
+ * hsi_driver_cancel_write_dma - Cancel an ongoing GDD [DMA] write for the
+ * specified hsi channel.
+ * @hsi_ch - pointer to the hsi_channel to cancel DMA write.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : DMA cancel success, data not transfered to TX FIFO
+ * 0 : DMA transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
+int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch)
+{
+ int lch = hsi_ch->write_data.lch;
+ unsigned int port = hsi_ch->hsi_port->port_number;
+ unsigned int channel = hsi_ch->channel_number;
+ struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller;
+ u16 ccr, gdd_csr;
+ long buff_offset;
+ u32 status_reg;
+ dma_addr_t dma_h;
+ size_t size;
+
+ dev_err(&hsi_ch->dev->device, "hsi_driver_cancel_write_dma( "
+ "channel %d\n", hsi_ch->channel_number);
+
+ if (lch < 0) {
+ dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI "
+ "channel %d\n", hsi_ch->channel_number);
+ return -ENXIO;
+ }
+ ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+ if (!(ccr & HSI_CCR_ENABLE)) {
+ dev_dbg(&hsi_ch->dev->device, "Write cancel on not "
+ "enabled logical channel %d CCR REG 0x%04X\n",
+ lch, ccr);
+ }
+ status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+
+ /* Clear CSR register by reading it, as it is cleared automaticaly */
+ /* by HW after SW read. */
+ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
+ hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+
+ /* Unmap DMA region */
+ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CSSA_REG(lch));
+ size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_TO_DEVICE);
+
+ buff_offset = hsi_hst_bufstate_f_reg(hsi_ctrl, port, channel);
+ if (buff_offset >= 0)
+ hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), hsi_ctrl->base,
+ buff_offset);
+
+ hsi_reset_ch_write(hsi_ch);
+ return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
+}
+
+/**
+ * hsi_driver_cancel_read_dma - Cancel an ongoing GDD [DMA] read for the
+ * specified hsi channel.
+ * @hsi_ch - pointer to the hsi_channel to cancel DMA read.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : DMA cancel success, data not available at expected
+ * address.
+ * 0 : DMA transfer is already over, data already available at
+ * expected address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
+int hsi_driver_cancel_read_dma(struct hsi_channel *hsi_ch)
+{
+ int lch = hsi_ch->read_data.lch;
+ struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller;
+ u16 ccr, gdd_csr;
+ u32 status_reg;
+ dma_addr_t dma_h;
+ size_t size;
+
+ dev_err(&hsi_ch->dev->device, "hsi_driver_cancel_read_dma "
+ "channel %d\n", hsi_ch->channel_number);
+
+ /* Re-enable interrupts for polling if needed */
+ if (hsi_ch->flags & HSI_CH_RX_POLL)
+ hsi_driver_enable_read_interrupt(hsi_ch, NULL);
+
+ if (lch < 0) {
+ dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI "
+ "channel %d\n", hsi_ch->channel_number);
+ return -ENXIO;
+ }
+
+ ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+ if (!(ccr & HSI_CCR_ENABLE)) {
+ dev_dbg(&hsi_ch->dev->device, "Read cancel on not "
+ "enabled logical channel %d CCR REG 0x%04X\n",
+ lch, ccr);
+ }
+
+ status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+
+ /* Clear CSR register by reading it, as it is cleared automaticaly */
+ /* by HW after SW read */
+ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
+ hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+
+ /* Unmap DMA region - Access to the buffer is now safe */
+ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CDSA_REG(lch));
+ size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_FROM_DEVICE);
+
+ hsi_reset_ch_read(hsi_ch);
+ return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
+}
+
+/**
+ * hsi_get_info_from_gdd_lch - Retrieve channels information from DMA channel
+ * @hsi_ctrl - HSI device control structure
+ * @lch - DMA logical channel
+ * @port - HSI port
+ * @channel - HSI channel
+ * @is_read_path - channel is used for reading
+ *
+ * Updates the port, channel and is_read_path parameters depending on the
+ * lch DMA channel status.
+ *
+ * Return 0 on success and < 0 on error.
+ */
+int hsi_get_info_from_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int lch,
+ unsigned int *port, unsigned int *channel,
+ unsigned int *is_read_path)
+{
+ int i_ports;
+ int i_chans;
+ int err = -1;
+
+ for (i_ports = 0; i_ports < HSI_MAX_PORTS; i_ports++)
+ for (i_chans = 0; i_chans < HSI_PORT_MAX_CH; i_chans++)
+ if (hsi_ctrl->hsi_port[i_ports].
+ hsi_channel[i_chans].read_data.lch == lch) {
+ *is_read_path = 1;
+ *port = i_ports + 1;
+ *channel = i_chans;
+ err = 0;
+ goto get_info_bk;
+ } else if (hsi_ctrl->hsi_port[i_ports].
+ hsi_channel[i_chans].write_data.lch == lch) {
+ *is_read_path = 0;
+ *port = i_ports + 1;
+ *channel = i_chans;
+ err = 0;
+ goto get_info_bk;
+ }
+get_info_bk:
+ return err;
+}
+
+static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
+{
+ void __iomem *base = hsi_ctrl->base;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ struct hsi_channel *ch;
+ unsigned int port;
+ unsigned int channel;
+ unsigned int is_read_path;
+ u32 gdd_csr;
+ dma_addr_t dma_h;
+ size_t size;
+ int fifo, fifo_words_avail;
+
+ if (hsi_get_info_from_gdd_lch(hsi_ctrl, gdd_lch, &port, &channel,
+ &is_read_path) < 0) {
+ dev_err(hsi_ctrl->dev, "Unable to match the DMA channel %d with"
+ " an HSI channel\n", gdd_lch);
+ return;
+ } else {
+ dev_dbg(hsi_ctrl->dev, "DMA event on gdd_lch=%d => port=%d, "
+ "channel=%d, read=%d\n", gdd_lch, port, channel,
+ is_read_path);
+ }
+
+ hsi_outl_and(~HSI_GDD_LCH(gdd_lch), base,
+ HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ /* Warning : CSR register is cleared automaticaly by HW after SW read */
+ gdd_csr = hsi_inw(base, HSI_GDD_CSR_REG(gdd_lch));
+
+ if (!(gdd_csr & HSI_CSR_TOUT)) {
+ if (is_read_path) { /* Read path */
+ dma_h = hsi_inl(base, HSI_GDD_CDSA_REG(gdd_lch));
+ size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
+ dma_sync_single_for_cpu(hsi_ctrl->dev, dma_h, size,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size,
+ DMA_FROM_DEVICE);
+ ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
+ hsi_reset_ch_read(ch);
+
+ dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback "
+ "(size %d).\n", channel, size/4);
+ spin_unlock(&hsi_ctrl->lock);
+ ch->read_done(ch->dev, size / 4);
+ spin_lock(&hsi_ctrl->lock);
+
+ /* Check if FIFO is correctly emptied */
+ if (hsi_driver_device_is_hsi(pdev)) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO "
+ "id found for channel %d.\n",
+ channel);
+ return;
+ }
+ fifo_words_avail =
+ hsi_get_rx_fifo_occupancy(hsi_ctrl,
+ fifo);
+ if (fifo_words_avail)
+ dev_dbg(hsi_ctrl->dev,
+ "WARNING: FIFO %d not empty "
+ "after DMA copy, remaining "
+ "%d/%d frames\n",
+ fifo, fifo_words_avail,
+ HSI_HSR_FIFO_SIZE);
+ }
+ /* Re-enable interrupts for polling if needed */
+ if (ch->flags & HSI_CH_RX_POLL)
+ hsi_driver_enable_read_interrupt(ch, NULL);
+ } else { /* Write path */
+ dma_h = hsi_inl(base, HSI_GDD_CSSA_REG(gdd_lch));
+ size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size,
+ DMA_TO_DEVICE);
+ ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
+ hsi_reset_ch_write(ch);
+
+ dev_dbg(hsi_ctrl->dev, "Calling ch %d write callback "
+ "(size %d).\n", channel, size/4);
+ spin_unlock(&hsi_ctrl->lock);
+ ch->write_done(ch->dev, size / 4);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ } else {
+ dev_err(hsi_ctrl->dev, "Time-out overflow Error on GDD transfer"
+ " on gdd channel %d\n", gdd_lch);
+ spin_unlock(&hsi_ctrl->lock);
+ /* TODO : need to perform a DMA soft reset */
+ hsi_port_event_handler(&hsi_ctrl->hsi_port[port - 1],
+ HSI_EVENT_ERROR, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+}
+
+static u32 hsi_process_dma_event(struct hsi_dev *hsi_ctrl)
+{
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int gdd_lch = 0;
+ u32 status_reg = 0;
+ u32 lch_served = 0;
+ unsigned int gdd_max_count = hsi_ctrl->gdd_chan_count;
+
+ status_reg = hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ if (!status_reg) {
+ dev_dbg(hsi_ctrl->dev, "DMA : no event, exit.\n");
+ return 0;
+ }
+
+ for (gdd_lch = 0; gdd_lch < gdd_max_count; gdd_lch++) {
+ if (status_reg & HSI_GDD_LCH(gdd_lch)) {
+ do_hsi_gdd_lch(hsi_ctrl, gdd_lch);
+ lch_served |= HSI_GDD_LCH(gdd_lch);
+ }
+ }
+
+ /* Acknowledge interrupt for DMA channel */
+ hsi_outl(lch_served, base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+
+
+ return status_reg;
+}
+
+static void do_hsi_gdd_tasklet(unsigned long device)
+{
+ struct hsi_dev *hsi_ctrl = (struct hsi_dev *)device;
+
+ dev_dbg(hsi_ctrl->dev, "DMA Tasklet : clock_enabled=%d\n",
+ hsi_ctrl->clock_enabled);
+
+ spin_lock(&hsi_ctrl->lock);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+ hsi_ctrl->in_dma_tasklet = true;
+
+ hsi_process_dma_event(hsi_ctrl);
+
+ hsi_ctrl->in_dma_tasklet = false;
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+ spin_unlock(&hsi_ctrl->lock);
+
+ enable_irq(hsi_ctrl->gdd_irq);
+}
+
+static irqreturn_t hsi_gdd_mpu_handler(int irq, void *p)
+{
+ struct hsi_dev *hsi_ctrl = p;
+
+ tasklet_hi_schedule(&hsi_ctrl->hsi_gdd_tasklet);
+
+ /* Disable interrupt until Bottom Half has cleared the IRQ status */
+ /* register */
+ disable_irq_nosync(hsi_ctrl->gdd_irq);
+
+ return IRQ_HANDLED;
+}
+
+int __init hsi_gdd_init(struct hsi_dev *hsi_ctrl, const char *irq_name)
+{
+ tasklet_init(&hsi_ctrl->hsi_gdd_tasklet, do_hsi_gdd_tasklet,
+ (unsigned long)hsi_ctrl);
+
+ dev_info(hsi_ctrl->dev, "Registering IRQ %s (%d)\n",
+ irq_name, hsi_ctrl->gdd_irq);
+
+ if (request_irq(hsi_ctrl->gdd_irq, hsi_gdd_mpu_handler,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH,
+ irq_name, hsi_ctrl) < 0) {
+ dev_err(hsi_ctrl->dev, "FAILED to request GDD IRQ %d\n",
+ hsi_ctrl->gdd_irq);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void hsi_gdd_exit(struct hsi_dev *hsi_ctrl)
+{
+ tasklet_kill(&hsi_ctrl->hsi_gdd_tasklet);
+ free_irq(hsi_ctrl->gdd_irq, hsi_ctrl);
+}
diff --git a/drivers/omap_hsi/hsi_driver_fifo.c b/drivers/omap_hsi/hsi_driver_fifo.c
new file mode 100644
index 0000000..aa33a1a
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_fifo.c
@@ -0,0 +1,325 @@
+/*
+ * hsi_driver_fifo.c
+ *
+ * Implements HSI module fifo management.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+#include "hsi_driver.h"
+
+/**
+ * hsi_fifo_get_id - Get fifo index corresponding to (port, channel)
+ * @hsi_ctrl - HSI controler data
+ * @channel - channel used
+ * @port - HSI port used
+ *
+ * Returns the fifo index associated to the provided (port, channel).
+ * Notes: 1) The fifo <=> (port, channel) correspondance depends on the selected
+ * SW strategy for channels mapping (fifo management).
+ * 2) the mapping is identical for Read and Write path.
+ * This exclusively applies to HSI devices.
+ */
+int hsi_fifo_get_id(struct hsi_dev *hsi_ctrl, unsigned int channel,
+ unsigned int port)
+{
+ int fifo_index = 0;
+ int err = 0;
+
+ if (unlikely((channel >= HSI_CHANNELS_MAX) || (port < 1) ||
+ (port > 2))) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ }
+
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_ALL_PORT1) {
+ if (unlikely(port != 1)) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ } else {
+ fifo_index = channel;
+ }
+ } else if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_SSI) {
+ if (unlikely(channel >= 8)) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ } else {
+ fifo_index = channel + 8 * (port - 1);
+ }
+ } else {
+ err = -EPERM;
+ goto fifo_id_bk;
+ }
+
+fifo_id_bk:
+ if (unlikely(err < 0)) {
+ fifo_index = err;
+ dev_err(hsi_ctrl->dev, "Cannot map a FIFO to the requested "
+ "params: channel:%d, port:%d; ERR=%d\n", channel, port,
+ err);
+ }
+
+ return fifo_index;
+}
+
+/**
+ * hsi_fifo_get_chan - Get (port, channel) from a fifo index
+ * @hsi_ctrl - HSI controler data
+ * @fifo - HSI fifo used (0..HSI_HST_FIFO_COUNT)
+ * @channel - related channel if any (0..)
+ * @port - related port if any (1..2)
+ *
+ * Returns 0 in case of success, and errocode (< 0) else
+ * Notes: 1) The fifo <=> (port, channel) correspondance depends on the selected
+ * SW strategy for channels mapping (fifo management).
+ * 2) the mapping is identical for Read and Write path.
+ * This exclusively applies to HSI devices.
+ */
+int hsi_fifo_get_chan(struct hsi_dev *hsi_ctrl, unsigned int fifo,
+ unsigned int *channel, unsigned int *port)
+{
+ int err = 0;
+
+ if (unlikely(fifo >= HSI_HST_FIFO_COUNT)) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ }
+
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_ALL_PORT1) {
+ *channel = fifo;
+ *port = 1;
+ } else if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_SSI) {
+ if (fifo < 8) {
+ *channel = fifo;
+ *port = 1;
+ } else {
+ *channel = fifo - 8;
+ *port = 2;
+ }
+ } else {
+ err = -EPERM;
+ goto fifo_id_bk;
+ }
+
+fifo_id_bk:
+ if (unlikely(err < 0))
+ dev_err(hsi_ctrl->dev, "Cannot map a channel / port to the "
+ "requested params: fifo:%d; ERR=%d\n", fifo, err);
+
+ return err;
+}
+
+/**
+ * hsi_fifo_mapping - Configures the HSI FIFO mapping registers.
+ * @hsi_ctrl - HSI controler data
+ * @mtype - mapping strategy
+ *
+ * Returns 0 in case of success, and errocode (< 0) else
+ * Configures the HSI FIFO mapping registers. Several mapping strategies are
+ * proposed.
+ * Note: The mapping is identical for Read and Write path.
+ * This exclusively applies to HSI devices.
+ */
+int hsi_fifo_mapping(struct hsi_dev *hsi_ctrl, unsigned int mtype)
+{
+ int err = 0;
+ void __iomem *base = hsi_ctrl->base;
+ int i;
+ unsigned int channel, port;
+
+ if (mtype == HSI_FIFO_MAPPING_ALL_PORT1) {
+ channel = 0;
+ for (i = 0; i < HSI_HST_FIFO_COUNT; i++) {
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (0 << HSI_MAPPING_PORT_NUMBER_OFFSET) |
+ HSI_HST_MAPPING_THRESH_VALUE,
+ base, HSI_HST_MAPPING_FIFO_REG(i));
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (0 << HSI_MAPPING_PORT_NUMBER_OFFSET),
+ base, HSI_HSR_MAPPING_FIFO_REG(i));
+ channel++;
+ }
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_UNDEF)
+ dev_dbg(hsi_ctrl->dev, "Fifo mapping : All FIFOs for "
+ "Port1\n");
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_ALL_PORT1;
+ } else if (mtype == HSI_FIFO_MAPPING_SSI) {
+ channel = 0;
+ port = 0;
+ for (i = 0; i < HSI_HST_FIFO_COUNT; i++) {
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (port << HSI_MAPPING_PORT_NUMBER_OFFSET) |
+ HSI_HST_MAPPING_THRESH_VALUE,
+ base, HSI_HST_MAPPING_FIFO_REG(i));
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (port << HSI_MAPPING_PORT_NUMBER_OFFSET),
+ base, HSI_HSR_MAPPING_FIFO_REG(i));
+ channel++;
+ if (channel == 8) {
+ channel = 0;
+ port = 1;
+ }
+ }
+
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_UNDEF)
+ dev_dbg(hsi_ctrl->dev, "Fifo mapping : 8 FIFOs per Port"
+ " (SSI compatible mode)\n");
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_SSI;
+ } else {
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_UNDEF;
+ dev_err(hsi_ctrl->dev, "Bad Fifo strategy request : %d\n",
+ mtype);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/**
+ * hsi_hst_bufstate_f_reg - Return the proper HSI_HST_BUFSTATE register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HST_BUFSTATE register offset
+ * Note: indexing of BUFSTATE registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hst_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hst_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HST_BUFSTATE_FIFO_REG(fifo);
+ } else {
+ return HSI_HST_BUFSTATE_REG(port);
+ }
+}
+
+/**
+ * hsi_hsr_bufstate_f_reg - Return the proper HSI_HSR_BUFSTATE register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HSR_BUFSTATE register offset
+ * Note: indexing of BUFSTATE registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hsr_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hsr_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HSR_BUFSTATE_FIFO_REG(fifo);
+ } else {
+ return HSI_HSR_BUFSTATE_REG(port);
+ }
+}
+
+/**
+ * hsi_hst_buffer_f_reg - Return the proper HSI_HST_BUFFER register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HST_BUFFER register offset
+ * Note: indexing of BUFFER registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hst_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hst_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HST_BUFFER_FIFO_REG(fifo);
+ } else {
+ return HSI_HST_BUFFER_CH_REG(port, channel);
+ }
+}
+
+/**
+ * hsi_hsr_buffer_f_reg - Return the proper HSI_HSR_BUFFER register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HSR_BUFFER register offset
+ * Note: indexing of BUFFER registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hsr_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hsr_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HSR_BUFFER_FIFO_REG(fifo);
+ } else {
+ return HSI_HSR_BUFFER_CH_REG(port, channel);
+ }
+}
+
+/**
+ * hsi_get_rx_fifo_occupancy - Return the size of data remaining
+ * in the given FIFO
+ * @hsi_ctrl - HSI controler data
+ * @fifo - FIFO to look at
+ *
+ * Returns the number of frames (32bits) remaining in the FIFO
+ */
+u8 hsi_get_rx_fifo_occupancy(struct hsi_dev *hsi_ctrl, u8 fifo)
+{
+ void __iomem *base = hsi_ctrl->base;
+ int hsr_mapping, mapping_words;
+
+ hsr_mapping = hsi_inl(base, HSI_HSR_MAPPING_FIFO_REG(fifo));
+ mapping_words = (hsr_mapping >> HSI_HST_MAPPING_THRESH_OFFSET) & 0xF;
+ return mapping_words;
+}
+
diff --git a/drivers/omap_hsi/hsi_driver_gpio.c b/drivers/omap_hsi/hsi_driver_gpio.c
new file mode 100644
index 0000000..4c8810b
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_gpio.c
@@ -0,0 +1,75 @@
+/*
+ * hsi_driver_gpio.c
+ *
+ * Implements HSI GPIO related functionality. (i.e: wake lines management)
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/gpio.h>
+#include "hsi_driver.h"
+
+static void do_hsi_cawake_tasklet(unsigned long hsi_p)
+{
+ struct hsi_port *port = (struct hsi_port *)hsi_p;
+ struct hsi_dev *hsi_ctrl = port->hsi_controller;
+
+ spin_lock(&hsi_ctrl->lock);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+ port->in_cawake_tasklet = true;
+
+ port->cawake_status = hsi_get_cawake(port);
+ hsi_do_cawake_process(port);
+
+ port->in_cawake_tasklet = false;
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+ spin_unlock(&hsi_ctrl->lock);
+}
+
+static irqreturn_t hsi_cawake_isr(int irq, void *hsi_p)
+{
+ struct hsi_port *port = hsi_p;
+
+ tasklet_hi_schedule(&port->cawake_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+int __init hsi_cawake_init(struct hsi_port *port, const char *irq_name)
+{
+ tasklet_init(&port->cawake_tasklet, do_hsi_cawake_tasklet,
+ (unsigned long)port);
+
+ if (request_irq(port->cawake_gpio_irq, hsi_cawake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, irq_name, port) < 0) {
+ dev_err(port->hsi_controller->dev,
+ "FAILED to request %s GPIO IRQ %d on port %d\n",
+ irq_name, port->cawake_gpio_irq, port->port_number);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void hsi_cawake_exit(struct hsi_port *port)
+{
+ if (port->cawake_gpio < 0)
+ return; /* Nothing to do (case SSI with GPIO or */
+ /* HSI with IO ring wakeup */
+
+ tasklet_kill(&port->cawake_tasklet);
+ free_irq(port->cawake_gpio_irq, port);
+}
diff --git a/drivers/omap_hsi/hsi_driver_if.c b/drivers/omap_hsi/hsi_driver_if.c
new file mode 100644
index 0000000..19012e5
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_if.c
@@ -0,0 +1,965 @@
+/*
+ * hsi_driver_if.c
+ *
+ * Implements HSI hardware driver interfaces for the upper layers.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "hsi_driver.h"
+
+#define NOT_SET (-1)
+
+/* Manage HSR divisor update
+ * A special divisor value allows switching to auto-divisor mode in Rx
+ * (but with error counters deactivated). This function implements the
+ * the transitions to/from this mode.
+ */
+int hsi_set_rx_divisor(struct hsi_port *sport, struct hsr_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ if (cfg->divisor == NOT_SET)
+ return 0;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
+ sport->counters_on) {
+ /* auto mode: deactivate counters + set divisor = 0 */
+ sport->reg_counters = hsi_inl(base, HSI_HSR_COUNTERS_REG
+ (port));
+ sport->counters_on = 0;
+ hsi_outl(0, base, HSI_HSR_COUNTERS_REG(port));
+ hsi_outl(0, base, HSI_HSR_DIVISOR_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Switched to HSR auto mode\n");
+ } else if (cfg->divisor != HSI_HSR_DIVISOR_AUTO) {
+ /* Divisor set mode: use counters */
+ /* Leave auto mode: use new counters values */
+ cfg->counters = 0xFFFFF;
+ sport->reg_counters = cfg->counters;
+ sport->counters_on = 1;
+ hsi_outl(cfg->counters, base,
+ HSI_HSR_COUNTERS_REG(port));
+ hsi_outl(cfg->divisor, base, HSI_HSR_DIVISOR_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Left HSR auto mode. "
+ "Counters=0x%08x, Divisor=0x%08x\n",
+ cfg->counters, cfg->divisor);
+ }
+ } else {
+ if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
+ sport->counters_on) {
+ /* auto mode: deactivate timeout */
+ sport->reg_counters = hsi_inl(base,
+ SSI_TIMEOUT_REG(port));
+ sport->counters_on = 0;
+ hsi_outl(0, base, SSI_TIMEOUT_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Deactivated SSR timeout\n");
+ } else if (cfg->divisor == HSI_SSR_DIVISOR_USE_TIMEOUT) {
+ /* Leave auto mode: use new counters values */
+ sport->reg_counters = cfg->counters;
+ sport->counters_on = 1;
+ hsi_outl(cfg->counters, base, SSI_TIMEOUT_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Left SSR auto mode. "
+ "Timeout=0x%08x\n", cfg->counters);
+ }
+ }
+
+ return 0;
+}
+
+int hsi_set_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
+ ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
+ ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_SLEEP) &&
+ (cfg->mode != NOT_SET))
+ return -EINVAL;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+ /* HSI only supports payload size of 32bits */
+ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ } else {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+ /* HSI only supports payload size of 32bits */
+ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ }
+
+ if ((cfg->channels == 0) ||
+ ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
+ return -EINVAL;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if ((cfg->divisor > HSI_MAX_RX_DIVISOR) &&
+ (cfg->divisor != HSI_HSR_DIVISOR_AUTO) &&
+ (cfg->divisor != NOT_SET))
+ return -EINVAL;
+ }
+
+ if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
+ hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK)
+ << HSI_FLOW_OFFSET), base,
+ HSI_HSR_MODE_REG(port));
+
+ if (cfg->frame_size != NOT_SET)
+ hsi_outl(cfg->frame_size, base, HSI_HSR_FRAMESIZE_REG(port));
+
+ if (cfg->channels != NOT_SET) {
+ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
+ return -EINVAL;
+ else
+ hsi_outl(cfg->channels, base,
+ HSI_HSR_CHANNELS_REG(port));
+ }
+
+ return hsi_set_rx_divisor(sport, cfg);
+}
+
+void hsi_get_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ cfg->mode = hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_MODE_VAL_MASK;
+ cfg->flow = (hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_FLOW_VAL_MASK)
+ >> HSI_FLOW_OFFSET;
+ cfg->frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port));
+ cfg->channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ cfg->divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port));
+ cfg->counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
+ } else {
+ cfg->counters = hsi_inl(base, SSI_TIMEOUT_REG(port));
+ }
+}
+
+int hsi_set_tx(struct hsi_port *sport, struct hst_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ unsigned int max_divisor = hsi_driver_device_is_hsi(pdev) ?
+ HSI_MAX_TX_DIVISOR : HSI_SSI_MAX_TX_DIVISOR;
+
+ if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
+ ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
+ (cfg->mode != NOT_SET))
+ return -EINVAL;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+ /* HSI only supports payload size of 32bits */
+ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ } else {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+
+ if ((cfg->frame_size > HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ }
+
+ if ((cfg->channels == 0) ||
+ ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
+ return -EINVAL;
+
+ if ((cfg->divisor > max_divisor) && (cfg->divisor != NOT_SET))
+ return -EINVAL;
+
+ if ((cfg->arb_mode != HSI_ARBMODE_ROUNDROBIN) &&
+ (cfg->arb_mode != HSI_ARBMODE_PRIORITY) && (cfg->mode != NOT_SET))
+ return -EINVAL;
+
+ if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
+ hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK) <<
+ HSI_FLOW_OFFSET) |
+ HSI_HST_MODE_WAKE_CTRL_SW, base,
+ HSI_HST_MODE_REG(port));
+
+ if (cfg->frame_size != NOT_SET)
+ hsi_outl(cfg->frame_size, base, HSI_HST_FRAMESIZE_REG(port));
+
+ if (cfg->channels != NOT_SET) {
+ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
+ return -EINVAL;
+ else
+ hsi_outl(cfg->channels, base,
+ HSI_HST_CHANNELS_REG(port));
+ }
+
+ if (cfg->divisor != NOT_SET)
+ hsi_outl(cfg->divisor, base, HSI_HST_DIVISOR_REG(port));
+
+ if (cfg->arb_mode != NOT_SET)
+ hsi_outl(cfg->arb_mode, base, HSI_HST_ARBMODE_REG(port));
+
+ return 0;
+}
+
+void hsi_get_tx(struct hsi_port *sport, struct hst_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+
+ cfg->mode = hsi_inl(base, HSI_HST_MODE_REG(port)) & HSI_MODE_VAL_MASK;
+ cfg->flow = (hsi_inl(base, HSI_HST_MODE_REG(port)) & HSI_FLOW_VAL_MASK)
+ >> HSI_FLOW_OFFSET;
+ cfg->frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port));
+ cfg->channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port));
+ cfg->divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port));
+ cfg->arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port));
+}
+
+/**
+ * hsi_open - open a hsi device channel.
+ * @dev - Reference to the hsi device channel to be openned.
+ *
+ * Returns 0 on success, -EINVAL on bad parameters, -EBUSY if is already opened.
+ */
+int hsi_open(struct hsi_device *dev)
+{
+ struct hsi_channel *ch;
+ struct hsi_port *port;
+ struct hsi_dev *hsi_ctrl;
+
+ if (!dev || !dev->ch) {
+ pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
+ return -EINVAL;
+ }
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ ch = dev->ch;
+ if (!ch->read_done || !ch->write_done) {
+ dev_err(dev->device.parent,
+ "Trying to open with no (read/write) callbacks "
+ "registered\n");
+ return -EINVAL;
+ }
+ port = ch->hsi_port;
+ hsi_ctrl = port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+
+ if (ch->flags & HSI_CH_OPEN) {
+ dev_err(dev->device.parent,
+ "Port %d Channel %d already OPENED\n",
+ dev->n_p, dev->n_ch);
+ spin_unlock_bh(&hsi_ctrl->lock);
+ return -EBUSY;
+ }
+
+ /* Restart with flags cleaned up */
+ ch->flags = HSI_CH_OPEN;
+
+ hsi_driver_enable_interrupt(port, HSI_CAWAKEDETECTED | HSI_ERROROCCURED
+ | HSI_BREAKDETECTED);
+
+ /* NOTE: error and break are port events and do not need to be
+ * enabled for HSI extended enable register */
+
+ hsi_clocks_disable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hsi_open);
+
+/**
+ * hsi_write - write data into the hsi device channel
+ * @dev - reference to the hsi device channel to write into.
+ * @addr - pointer to a 32-bit word data to be written.
+ * @size - number of 32-bit word to be written.
+ *
+ * Return 0 on sucess, a negative value on failure.
+ * A success value only indicates that the request has been accepted.
+ * Transfer is only completed when the write_done callback is called.
+ *
+ */
+int hsi_write(struct hsi_device *dev, u32 *addr, unsigned int size)
+{
+ struct hsi_channel *ch;
+ int err;
+
+ if (unlikely(!dev)) {
+ pr_err(LOG_NAME "Null dev pointer in hsi_write\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(!dev->ch || !addr || (size <= 0))) {
+ dev_err(dev->device.parent,
+ "Wrong parameters hsi_device %p data %p count %d",
+ dev, addr, size);
+ return -EINVAL;
+ }
+ dev_dbg(dev->device.parent, "%s ch %d, @%x, size %d u32\n", __func__,
+ dev->n_ch, (u32) addr, size);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+
+ spin_lock_bh(&ch->hsi_port->hsi_controller->lock);
+ if (pm_runtime_suspended(dev->device.parent) ||
+ !ch->hsi_port->hsi_controller->clock_enabled)
+ dev_dbg(dev->device.parent,
+ "hsi_write with HSI clocks OFF, clock_enabled = %d\n",
+ ch->hsi_port->hsi_controller->clock_enabled);
+
+ hsi_clocks_enable_channel(dev->device.parent,
+ ch->channel_number, __func__);
+
+ if (ch->write_data.addr != NULL) {
+ dev_err(dev->device.parent, "# Invalid request - Write "
+ "operation pending port %d channel %d\n",
+ ch->hsi_port->port_number,
+ ch->channel_number);
+
+ hsi_clocks_disable_channel(dev->device.parent,
+ ch->channel_number, __func__);
+ spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
+ return -EINVAL;
+ }
+
+ ch->write_data.addr = addr;
+ ch->write_data.size = size;
+ ch->write_data.lch = -1;
+
+ if (size == 1)
+ err = hsi_driver_enable_write_interrupt(ch, addr);
+ else
+ err = hsi_driver_write_dma(ch, addr, size);
+
+ if (unlikely(err < 0)) {
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ dev_err(dev->device.parent, "Failed to program write\n");
+ }
+
+ spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
+
+ /* Leave clocks enabled until transfer is complete (write callback */
+ /* is called */
+ return err;
+}
+EXPORT_SYMBOL(hsi_write);
+
+/**
+ * hsi_read - read data from the hsi device channel
+ * @dev - hsi device channel reference to read data from.
+ * @addr - pointer to a 32-bit word data to store the data.
+ * @size - number of 32-bit word to be stored.
+ *
+ * Return 0 on sucess, a negative value on failure.
+ * A success value only indicates that the request has been accepted.
+ * Data is only available in the buffer when the read_done callback is called.
+ *
+ */
+int hsi_read(struct hsi_device *dev, u32 *addr, unsigned int size)
+{
+ struct hsi_channel *ch;
+ int err;
+
+ if (unlikely(!dev)) {
+ pr_err(LOG_NAME "Null dev pointer in hsi_read\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(!dev->ch || !addr || (size <= 0))) {
+ dev_err(dev->device.parent, "Wrong parameters "
+ "hsi_device %p data %p count %d", dev, addr, size);
+ return -EINVAL;
+ }
+#if 0
+ if (dev->n_ch == 0)
+ dev_info(dev->device.parent, "%s ch %d, @%x, size %d u32\n",
+ __func__, dev->n_ch, (u32) addr, size);
+#endif
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+
+ spin_lock_bh(&ch->hsi_port->hsi_controller->lock);
+ if (pm_runtime_suspended(dev->device.parent) ||
+ !ch->hsi_port->hsi_controller->clock_enabled)
+ dev_dbg(dev->device.parent,
+ "hsi_read with HSI clocks OFF, clock_enabled = %d\n",
+ ch->hsi_port->hsi_controller->clock_enabled);
+
+ hsi_clocks_enable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+
+ if (ch->read_data.addr != NULL) {
+ dev_err(dev->device.parent, "# Invalid request - Read "
+ "operation pending port %d channel %d\n",
+ ch->hsi_port->port_number,
+ ch->channel_number);
+ err = -EINVAL;
+ goto done;
+ }
+
+ ch->read_data.addr = addr;
+ ch->read_data.size = size;
+ ch->read_data.lch = -1;
+
+ if (size == 1)
+ err = hsi_driver_enable_read_interrupt(ch, addr);
+ else
+ err = hsi_driver_read_dma(ch, addr, size);
+
+ if (unlikely(err < 0)) {
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ dev_err(dev->device.parent, "Failed to program read\n");
+ }
+
+done:
+ hsi_clocks_disable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+ spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hsi_read);
+
+int __hsi_write_cancel(struct hsi_channel *ch)
+{
+ int err = -ENODATA;
+ if (ch->write_data.size == 1)
+ err = hsi_driver_cancel_write_interrupt(ch);
+ else if (ch->write_data.size > 1)
+ err = hsi_driver_cancel_write_dma(ch);
+ else
+ dev_dbg(ch->dev->device.parent, "%s : Nothing to cancel %d\n",
+ __func__, ch->write_data.size);
+ dev_err(ch->dev->device.parent, "%s : %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * hsi_write_cancel - Cancel pending write request.
+ * @dev - hsi device channel where to cancel the pending write.
+ *
+ * write_done() callback will not be called after success of this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : write cancel success, data not transfered to TX FIFO
+ * 0 : transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
+int hsi_write_cancel(struct hsi_device *dev)
+{
+ int err;
+ if (unlikely(!dev || !dev->ch)) {
+ pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
+ return -ENODEV;
+ }
+ dev_err(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -ENODEV;
+ }
+
+ spin_lock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ err = __hsi_write_cancel(dev->ch);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ return err;
+}
+EXPORT_SYMBOL(hsi_write_cancel);
+
+int __hsi_read_cancel(struct hsi_channel *ch)
+{
+ int err = -ENODATA;
+ if (ch->read_data.size == 1)
+ err = hsi_driver_cancel_read_interrupt(ch);
+ else if (ch->read_data.size > 1)
+ err = hsi_driver_cancel_read_dma(ch);
+ else
+ dev_dbg(ch->dev->device.parent, "%s : Nothing to cancel %d\n",
+ __func__, ch->read_data.size);
+
+ dev_err(ch->dev->device.parent, "%s : %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * hsi_read_cancel - Cancel pending read request.
+ * @dev - hsi device channel where to cancel the pending read.
+ *
+ * read_done() callback will not be called after success of this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : read cancel success, data not available at expected
+ * address.
+ * 0 : transfer is already over, data already available at expected
+ * address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
+int hsi_read_cancel(struct hsi_device *dev)
+{
+ int err;
+ if (unlikely(!dev || !dev->ch)) {
+ pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
+ return -ENODEV;
+ }
+ dev_err(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -ENODEV;
+ }
+
+ spin_lock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ err = __hsi_read_cancel(dev->ch);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ return err;
+}
+EXPORT_SYMBOL(hsi_read_cancel);
+
+/**
+ * hsi_poll - HSI poll feature, enables data interrupt on frame reception
+ * @dev - hsi device channel reference to apply the I/O control
+ * (or port associated to it)
+ *
+ * Return 0 on success, a negative value on failure.
+ *
+ */
+int hsi_poll(struct hsi_device *dev)
+{
+ struct hsi_channel *ch;
+ struct hsi_dev *hsi_ctrl;
+ int err;
+
+ if (unlikely(!dev || !dev->ch))
+ return -EINVAL;
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+ hsi_ctrl = ch->hsi_port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ ch->flags |= HSI_CH_RX_POLL;
+
+ err = hsi_driver_enable_read_interrupt(ch, NULL);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hsi_poll);
+
+/**
+ * hsi_unpoll - HSI poll feature, disables data interrupt on frame reception
+ * @dev - hsi device channel reference to apply the I/O control
+ * (or port associated to it)
+ *
+ * Return 0 on success, a negative value on failure.
+ *
+ */
+int hsi_unpoll(struct hsi_device *dev)
+{
+ struct hsi_channel *ch;
+ struct hsi_dev *hsi_ctrl;
+
+ if (unlikely(!dev || !dev->ch))
+ return -EINVAL;
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+ hsi_ctrl = ch->hsi_port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ ch->flags &= ~HSI_CH_RX_POLL;
+
+ hsi_driver_disable_read_interrupt(ch);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hsi_unpoll);
+
+/**
+ * hsi_ioctl - HSI I/O control
+ * @dev - hsi device channel reference to apply the I/O control
+ * (or port associated to it)
+ * @command - HSI I/O control command
+ * @arg - parameter associated to the control command. NULL, if no parameter.
+ *
+ * Return 0 on sucess, a negative value on failure.
+ *
+ */
+int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg)
+{
+ struct hsi_channel *ch;
+ struct hsi_dev *hsi_ctrl;
+ struct hsi_port *pport;
+ void __iomem *base;
+ unsigned int port, channel;
+ u32 acwake;
+ int err = 0;
+ int fifo = 0;
+
+ if (unlikely((!dev) ||
+ (!dev->ch) ||
+ (!dev->ch->hsi_port) ||
+ (!dev->ch->hsi_port->hsi_controller)) ||
+ (!(dev->ch->flags & HSI_CH_OPEN))) {
+ pr_err(LOG_NAME "HSI IOCTL Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+ pport = ch->hsi_port;
+ hsi_ctrl = ch->hsi_port->hsi_controller;
+ port = ch->hsi_port->port_number;
+ channel = ch->channel_number;
+ base = hsi_ctrl->base;
+
+ dev_dbg(dev->device.parent, "IOCTL: ch %d, command %d\n",
+ channel, command);
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, channel, __func__);
+
+ switch (command) {
+ case HSI_IOCTL_ACWAKE_UP:
+ if (ch->flags & HSI_CH_ACWAKE) {
+ dev_dbg(dev->device.parent, "Duplicate ACWAKE UP\n");
+ err = -EPERM;
+ goto out;
+ }
+
+ /* Wake up request to Modem (typically OMAP initiated) */
+ /* Symetrical disable will be done in HSI_IOCTL_ACWAKE_DOWN */
+
+ ch->flags |= HSI_CH_ACWAKE;
+ pport->acwake_status |= BIT(channel);
+
+ /* We only claim once the wake line per channel */
+ acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
+ if (!(acwake & HSI_WAKE(channel))) {
+ hsi_outl(HSI_SET_WAKE(channel), base,
+ HSI_SYS_SET_WAKE_REG(port));
+ }
+
+ goto out;
+ break;
+ case HSI_IOCTL_ACWAKE_DOWN:
+ /* Low power request initiation (OMAP initiated, typically */
+ /* following inactivity timeout) */
+ /* ACPU HSI block shall still be capable of receiving */
+ if (!(ch->flags & HSI_CH_ACWAKE)) {
+ dev_dbg(dev->device.parent, "Duplicate ACWAKE DOWN\n");
+ err = -EPERM;
+ goto out;
+ }
+
+ acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
+ if (unlikely(pport->acwake_status !=
+ (acwake & HSI_WAKE_MASK))) {
+ dev_warn(dev->device.parent,
+ "ACWAKE shadow register mismatch"
+ " acwake_status: 0x%x, HSI_SYS_WAKE_REG: 0x%x",
+ pport->acwake_status, acwake);
+ pport->acwake_status = acwake & HSI_WAKE_MASK;
+ }
+ /* SSI_TODO: add safety check for SSI also */
+
+ ch->flags &= ~HSI_CH_ACWAKE;
+ pport->acwake_status &= ~BIT(channel);
+
+ /* Release the wake line per channel */
+ if ((acwake & HSI_WAKE(channel))) {
+ hsi_outl(HSI_CLEAR_WAKE(channel), base,
+ HSI_SYS_CLEAR_WAKE_REG(port));
+ }
+
+ goto out;
+ break;
+ case HSI_IOCTL_SEND_BREAK:
+ hsi_outl(1, base, HSI_HST_BREAK_REG(port));
+ /*HSI_TODO : need to deactivate clock after BREAK frames sent*/
+ /*Use interrupt ? (if TX BREAK INT exists)*/
+ break;
+ case HSI_IOCTL_GET_ACWAKE:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ *(u32 *)arg = hsi_inl(base, HSI_SYS_WAKE_REG(port));
+ break;
+ case HSI_IOCTL_FLUSH_RX:
+ hsi_outl(0, base, HSI_HSR_RXSTATE_REG(port));
+ break;
+ case HSI_IOCTL_FLUSH_TX:
+ hsi_outl(0, base, HSI_HST_TXSTATE_REG(port));
+ break;
+ case HSI_IOCTL_GET_CAWAKE:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = hsi_get_cawake(dev->ch->hsi_port);
+ if (err < 0) {
+ err = -ENODEV;
+ goto out;
+ }
+ *(u32 *)arg = err;
+ break;
+ case HSI_IOCTL_SET_RX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = hsi_set_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
+ break;
+ case HSI_IOCTL_GET_RX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ hsi_get_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
+ break;
+ case HSI_IOCTL_SET_TX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = hsi_set_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
+ break;
+ case HSI_IOCTL_GET_TX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ hsi_get_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
+ break;
+ case HSI_IOCTL_SW_RESET:
+ dev_info(dev->device.parent, "SW Reset\n");
+ err = hsi_softreset(hsi_ctrl);
+
+ /* Reset HSI config to default */
+ hsi_softreset_driver(hsi_ctrl);
+ break;
+ case HSI_IOCTL_GET_FIFO_OCCUPANCY:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
+ "channel %d.\n", channel);
+ err = -EFAULT;
+ goto out;
+ }
+ *(size_t *)arg = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
+ break;
+ case HSI_IOCTL_SET_ACREADY_SAFEMODE:
+ omap_writel(omap_readl(0x4A1000C8) | 0x7, 0x4A1000C8);
+ break;
+ case HSI_IOCTL_SET_ACREADY_NORMAL:
+ omap_writel(omap_readl(0x4A1000C8) & 0xFFFFFFF9, 0x4A1000C8);
+ case HSI_IOCTL_SET_3WIRE_MODE:
+ omap_writel(0x30000, 0x4A058C08);
+ break;
+ case HSI_IOCTL_SET_4WIRE_MODE:
+ omap_writel((omap_readl(0x4A058C08) & 0xFFFF), 0x4A058C08);
+ break;
+ default:
+ err = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ /* All IOCTL end by disabling the clocks, except ACWAKE high. */
+ hsi_clocks_disable_channel(dev->device.parent, channel, __func__);
+
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hsi_ioctl);
+
+/**
+ * hsi_close - close given hsi device channel
+ * @dev - reference to hsi device channel.
+ */
+void hsi_close(struct hsi_device *dev)
+{
+ struct hsi_dev *hsi_ctrl;
+
+ if (!dev || !dev->ch) {
+ pr_err(LOG_NAME "Trying to close wrong HSI device %p\n", dev);
+ return;
+ }
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ hsi_ctrl = dev->ch->hsi_port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ if (dev->ch->flags & HSI_CH_OPEN) {
+ dev->ch->flags &= ~HSI_CH_OPEN;
+ __hsi_write_cancel(dev->ch);
+ __hsi_read_cancel(dev->ch);
+ }
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+}
+EXPORT_SYMBOL(hsi_close);
+
+/**
+ * hsi_set_read_cb - register read_done() callback.
+ * @dev - reference to hsi device channel where the callback is associated to.
+ * @read_cb - callback to signal read transfer completed.
+ * size is expressed in number of 32-bit words.
+ *
+ * NOTE: Write callback must be only set when channel is not open !
+ */
+void hsi_set_read_cb(struct hsi_device *dev,
+ void (*read_cb) (struct hsi_device *dev,
+ unsigned int size))
+{
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ dev->ch->read_done = read_cb;
+}
+EXPORT_SYMBOL(hsi_set_read_cb);
+
+/**
+ * hsi_set_read_cb - register write_done() callback.
+ * @dev - reference to hsi device channel where the callback is associated to.
+ * @write_cb - callback to signal read transfer completed.
+ * size is expressed in number of 32-bit words.
+ *
+ * NOTE: Read callback must be only set when channel is not open !
+ */
+void hsi_set_write_cb(struct hsi_device *dev,
+ void (*write_cb) (struct hsi_device *dev,
+ unsigned int size))
+{
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ dev->ch->write_done = write_cb;
+}
+EXPORT_SYMBOL(hsi_set_write_cb);
+
+/**
+ * hsi_set_port_event_cb - register port_event callback.
+ * @dev - reference to hsi device channel where the callback is associated to.
+ * @port_event_cb - callback to signal events from the channel port.
+ */
+void hsi_set_port_event_cb(struct hsi_device *dev,
+ void (*port_event_cb) (struct hsi_device *dev,
+ unsigned int event,
+ void *arg))
+{
+ struct hsi_port *port = dev->ch->hsi_port;
+ struct hsi_dev *hsi_ctrl = port->hsi_controller;
+
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ write_lock_bh(&dev->ch->rw_lock);
+ dev->ch->port_event = port_event_cb;
+ write_unlock_bh(&dev->ch->rw_lock);
+
+ /* Since we now have a callback registered for events, we can now */
+ /* enable the CAWAKE, ERROR and BREAK interrupts */
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ hsi_driver_enable_interrupt(port, HSI_CAWAKEDETECTED | HSI_ERROROCCURED
+ | HSI_BREAKDETECTED);
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+}
+EXPORT_SYMBOL(hsi_set_port_event_cb);
diff --git a/drivers/omap_hsi/hsi_driver_int.c b/drivers/omap_hsi/hsi_driver_int.c
new file mode 100644
index 0000000..ce67e5f
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_int.c
@@ -0,0 +1,717 @@
+/*
+ * hsi_driver_int.c
+ *
+ * Implements HSI interrupt functionality.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "hsi_driver.h"
+#include <linux/delay.h>
+int shceduled_already_flag = 0;
+void hsi_reset_ch_read(struct hsi_channel *ch)
+{
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ ch->read_data.lch = -1;
+}
+
+void hsi_reset_ch_write(struct hsi_channel *ch)
+{
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ ch->write_data.lch = -1;
+}
+
+/* Check if a Write (data transfer from AP to CP) is
+ * ongoing for a given HSI channel
+ */
+bool hsi_is_channel_busy(struct hsi_channel *ch)
+{
+ if (ch->write_data.addr == NULL)
+ return false;
+
+ /* Note: we do not check if there is a read pending, because incoming */
+ /* data will trigger an interrupt (FIFO or DMA), and wake up the */
+ /* platform, so no need to keep the clocks ON. */
+ return true;
+}
+
+/* Check if a HSI port is busy :
+ * - data transfer (Write) is ongoing for a given HSI channel
+ * - CAWAKE is high
+ * - Currently in HSI interrupt tasklet
+ * - Currently in HSI CAWAKE tasklet (for SSI)
+ */
+bool hsi_is_hsi_port_busy(struct hsi_port *pport)
+{
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ bool cur_cawake = hsi_get_cawake(pport);
+ int ch;
+
+ if (pport->in_int_tasklet) {
+ dev_dbg(hsi_ctrl->dev, "Interrupt tasklet running\n");
+ return true;
+ }
+
+ if (pport->in_cawake_tasklet) {
+ dev_dbg(hsi_ctrl->dev, "SSI Cawake tasklet running\n");
+ return true;
+ }
+
+ if (cur_cawake) {
+ dev_dbg(hsi_ctrl->dev, "Port %d: WAKE status: acwake_status %d,"
+ "cur_cawake %d", pport->port_number,
+ pport->acwake_status, cur_cawake);
+ return true;
+ }
+
+ for (ch = 0; ch < pport->max_ch; ch++)
+ if (hsi_is_channel_busy(&pport->hsi_channel[ch])) {
+ dev_dbg(hsi_ctrl->dev, "Port %d; channel %d "
+ "busy\n", pport->port_number, ch);
+ return true;
+ }
+
+ return false;
+}
+
+/* Check if HSI controller is busy :
+ * - One of the HSI port is busy
+ * - Currently in HSI DMA tasklet
+ */
+bool hsi_is_hsi_controller_busy(struct hsi_dev *hsi_ctrl)
+{
+ int port;
+
+ if (hsi_ctrl->in_dma_tasklet) {
+ dev_dbg(hsi_ctrl->dev, "DMA tasklet running\n");
+ return true;
+ }
+
+ for (port = 0; port < hsi_ctrl->max_p; port++)
+ if (hsi_is_hsi_port_busy(&hsi_ctrl->hsi_port[port])) {
+ dev_dbg(hsi_ctrl->dev, "Port %d busy\n", port + 1);
+ return true;
+ }
+
+ dev_dbg(hsi_ctrl->dev, "No activity on HSI controller\n");
+ return false;
+}
+
+bool hsi_is_hst_port_busy(struct hsi_port *pport)
+{
+ unsigned int port = pport->port_number;
+ void __iomem *base = pport->hsi_controller->base;
+ u32 txstateval;
+
+ txstateval = hsi_inl(base, HSI_HST_TXSTATE_REG(port)) &
+ HSI_HST_TXSTATE_VAL_MASK;
+
+ if (txstateval != HSI_HST_TXSTATE_IDLE) {
+ dev_dbg(pport->hsi_controller->dev, "HST port %d busy, "
+ "TXSTATE=%d\n", port, txstateval);
+ return true;
+ }
+
+ return false;
+}
+
+bool hsi_is_hst_controller_busy(struct hsi_dev *hsi_ctrl)
+{
+ int port;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++)
+ if (hsi_is_hst_port_busy(&hsi_ctrl->hsi_port[port]))
+ return true;
+
+ return false;
+}
+
+
+/* Enables the CAWAKE, BREAK, or ERROR interrupt for the given port */
+int hsi_driver_enable_interrupt(struct hsi_port *pport, u32 flag)
+{
+ hsi_outl_or(flag, pport->hsi_controller->base,
+ HSI_SYS_MPU_ENABLE_REG(pport->port_number, pport->n_irq));
+
+ return 0;
+}
+
+/* Enables the Data Accepted Interrupt of HST for the given channel */
+int hsi_driver_enable_write_interrupt(struct hsi_channel *ch, u32 * data)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+
+ hsi_outl_or(HSI_HST_DATAACCEPT(channel), p->hsi_controller->base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ return 0;
+}
+
+/* Enables the Data Available Interrupt of HSR for the given channel */
+int hsi_driver_enable_read_interrupt(struct hsi_channel *ch, u32 * data)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+
+ hsi_outl_or(HSI_HSR_DATAAVAILABLE(channel), p->hsi_controller->base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ return 0;
+}
+
+/**
+ * hsi_driver_cancel_write_interrupt - Cancel pending write interrupt.
+ * @dev - hsi device channel where to cancel the pending interrupt.
+ *
+ * Return: -ECANCELED : write cancel success, data not transfered to TX FIFO
+ * 0 : transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
+int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+ u32 status_reg;
+ long buff_offset;
+
+ status_reg = hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ if (!(status_reg & HSI_HST_DATAACCEPT(channel))) {
+ dev_dbg(&ch->dev->device, "Write cancel on not "
+ "enabled channel %d ENABLE REG 0x%08X", channel,
+ status_reg);
+ }
+ status_reg &= hsi_inl(base,
+ HSI_SYS_MPU_STATUS_CH_REG(port, p->n_irq, channel));
+
+ hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ buff_offset = hsi_hst_bufstate_f_reg(p->hsi_controller, port, channel);
+ if (buff_offset >= 0)
+ hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), base, buff_offset);
+ hsi_reset_ch_write(ch);
+ return status_reg & HSI_HST_DATAACCEPT(channel) ? 0 : -ECANCELED;
+}
+
+/**
+ * hsi_driver_cancel_read_interrupt - Cancel pending read interrupt.
+ * @dev - hsi device channel where to cancel the pending interrupt.
+ *
+ * Return: -ECANCELED : read cancel success data not available at expected
+ * address.
+ * 0 : transfer is already over, data already available at expected
+ * address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
+int hsi_driver_cancel_read_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+ u32 status_reg;
+
+ status_reg = hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+ if (!(status_reg & HSI_HSR_DATAAVAILABLE(channel))) {
+ dev_dbg(&ch->dev->device, "Read cancel on not "
+ "enabled channel %d ENABLE REG 0x%08X", channel,
+ status_reg);
+ }
+ status_reg &= hsi_inl(base,
+ HSI_SYS_MPU_STATUS_CH_REG(port, p->n_irq, channel));
+ hsi_outl_and(~HSI_HSR_DATAAVAILABLE(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+ hsi_reset_ch_read(ch);
+ return status_reg & HSI_HSR_DATAAVAILABLE(channel) ? 0 : -ECANCELED;
+}
+
+void hsi_driver_disable_write_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+
+ hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+}
+
+void hsi_driver_disable_read_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+
+ hsi_outl_and(~HSI_HSR_DATAAVAILABLE(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+}
+
+/* HST_ACCEPTED interrupt processing */
+static void hsi_do_channel_tx(struct hsi_channel *ch)
+{
+ struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int n_ch;
+ unsigned int n_p;
+ unsigned int irq;
+ long buff_offset;
+
+ n_ch = ch->channel_number;
+ n_p = ch->hsi_port->port_number;
+ irq = ch->hsi_port->n_irq;
+
+ dev_dbg(hsi_ctrl->dev,
+ "Data Accepted interrupt for channel %d.\n", n_ch);
+
+ hsi_driver_disable_write_interrupt(ch);
+
+ if (ch->write_data.addr == NULL) {
+ dev_err(hsi_ctrl->dev, "Error, NULL Write address.\n");
+ hsi_reset_ch_write(ch);
+
+ } else {
+ buff_offset = hsi_hst_buffer_reg(hsi_ctrl, n_p, n_ch);
+ if (buff_offset >= 0) {
+ hsi_outl(*(ch->write_data.addr), base, buff_offset);
+ ch->write_data.addr = NULL;
+ }
+ }
+
+ spin_unlock(&hsi_ctrl->lock);
+ dev_dbg(hsi_ctrl->dev, "Calling ch %d write callback.\n", n_ch);
+ (*ch->write_done) (ch->dev, 1);
+ spin_lock(&hsi_ctrl->lock);
+}
+
+/* HSR_AVAILABLE interrupt processing */
+static void hsi_do_channel_rx(struct hsi_channel *ch)
+{
+ struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller;
+ void __iomem *base = ch->hsi_port->hsi_controller->base;
+ unsigned int n_ch;
+ unsigned int n_p;
+ unsigned int irq;
+ long buff_offset;
+ int rx_poll = 0;
+ int data_read = 0;
+ int fifo, fifo_words_avail;
+ unsigned int data;
+
+ n_ch = ch->channel_number;
+ n_p = ch->hsi_port->port_number;
+ irq = ch->hsi_port->n_irq;
+
+ dev_dbg(hsi_ctrl->dev,
+ "Data Available interrupt for channel %d.\n", n_ch);
+
+ /* Check if there is data in FIFO available for reading */
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, n_ch, n_p);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
+ "channel %d.\n", n_ch);
+ return;
+ }
+ fifo_words_avail = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
+ if (!fifo_words_avail) {
+ dev_dbg(hsi_ctrl->dev,
+ "WARNING: RX FIFO %d empty before CPU copy\n",
+ fifo);
+
+ /* Do not disable interrupt becaue another interrupt */
+ /* can still come, this time with a real frame. */
+ return;
+ }
+ }
+
+ /*
+ * Check race condition: RX transmission initiated but DMA transmission
+ * already started - acknowledge then ignore interrupt occurence
+ */
+ if (ch->read_data.lch != -1) {
+ dev_err(hsi_ctrl->dev,
+ "race condition between rx txmn and DMA txmn %0x\n",
+ ch->read_data.lch);
+ hsi_driver_disable_read_interrupt(ch);
+ goto done;
+ }
+
+ if (ch->flags & HSI_CH_RX_POLL)
+ rx_poll = 1;
+
+ if (ch->read_data.addr) {
+ buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, n_p, n_ch);
+ if (buff_offset >= 0) {
+ data_read = 1;
+ data = *(ch->read_data.addr) = hsi_inl(base,
+ buff_offset);
+ }
+ }
+#if 0
+ if (omap_readl(0x4A05A810))
+ dev_err(hsi_ctrl->dev,
+ "RX BUF state is full. "
+ "Warning disabling interrupt %0x\n",
+ omap_readl(0x4A05A810));
+#endif
+ hsi_driver_disable_read_interrupt(ch);
+ hsi_reset_ch_read(ch);
+
+done:
+ if (rx_poll) {
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(ch->hsi_port,
+ HSI_EVENT_HSR_DATAAVAILABLE,
+ (void *)n_ch);
+ spin_lock(&hsi_ctrl->lock);
+ }
+
+ if (data_read) {
+ spin_unlock(&hsi_ctrl->lock);
+#if 0
+ dev_warn(hsi_ctrl->dev, "Read callback %d.\n", n_ch);
+ if (n_ch == 0)
+ dev_warn(hsi_ctrl->dev,
+ "Read callback %d \t DATA 0x%0x .\n",
+ n_ch, data);
+#endif
+ (*ch->read_done) (ch->dev, 1);
+ spin_lock(&hsi_ctrl->lock);
+ }
+}
+
+/**
+ * hsi_do_cawake_process - CAWAKE line management
+ * @pport - HSI port to process
+ *
+ * This function handles the CAWAKE L/H transitions and call the event callback
+ * accordingly.
+ *
+ * Returns 0 if CAWAKE event process, -EAGAIN if CAWAKE event processing is
+ * delayed due to a pending DMA interrupt.
+ * If -EAGAIN is returned, pport->hsi_tasklet has to be re-scheduled once
+ * DMA tasklet has be executed. This should be done automatically by driver.
+ *
+*/
+int hsi_do_cawake_process(struct hsi_port *pport)
+{
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ bool cawake_status = hsi_get_cawake(pport);
+
+ /* Deal with init condition */
+ if (unlikely(pport->cawake_status < 0))
+ pport->cawake_status = !cawake_status;
+ dev_dbg(hsi_ctrl->dev,
+ "Interrupts are not enabled but CAWAKE has come\n: 0x%0x.\n",
+ omap_readl(0x4A05880c));
+ dev_dbg(hsi_ctrl->dev,
+ "Interrupts are not enabled but CAWAKE has come\n: 0x%0x.\n",
+ omap_readl(0x4A058804));
+
+ /* Check CAWAKE line status */
+ if (cawake_status) {
+ dev_dbg(hsi_ctrl->dev, "CAWAKE rising edge detected\n");
+
+ /* Check for possible mismatch (race condition) */
+ if (unlikely(pport->cawake_status)) {
+ dev_warn(hsi_ctrl->dev,
+ "CAWAKE race is detected: %s.\n",
+ "HI -> LOW -> HI");
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN,
+ NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ pport->cawake_status = 1;
+ if (omap_readl(0x4A306404) != 0x0) {
+ omap_writel(0x00000002, 0x4A004400);
+ omap_writel(0x003F0703, 0x4A306400);
+ omap_writel(0x003F0700, 0x4A306400);
+ omap_writel(0x00000003, 0x4A004400);
+ }
+ /* Force HSI to ON_ACTIVE when CAWAKE is high */
+ hsi_set_pm_force_hsi_on(hsi_ctrl);
+ /* TODO: Use omap_pm_set_max_dev_wakeup_lat() to set latency */
+ /* constraint to prevent L3INIT to enter RET/OFF when CAWAKE */
+ /* is high */
+
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ } else {
+ dev_dbg(hsi_ctrl->dev, "CAWAKE falling edge detected\n");
+
+ /* Check for pending DMA interrupt */
+ if (hsi_is_dma_read_int_pending(hsi_ctrl)) {
+ dev_dbg(hsi_ctrl->dev, "Pending DMA Read interrupt "
+ "before CAWAKE->L, exiting "
+ "Interrupt tasklet.\n");
+ return -EAGAIN;
+ }
+ if (unlikely(!pport->cawake_status)) {
+ dev_warn(hsi_ctrl->dev,
+ "CAWAKE race is detected: %s.\n",
+ "LOW -> HI -> LOW");
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP,
+ NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ pport->cawake_status = 0;
+
+ /* Allow HSI HW to enter IDLE when CAWAKE is low */
+ hsi_set_pm_default(hsi_ctrl);
+ /* TODO: Use omap_pm_set_max_dev_wakeup_lat() to release */
+ /* latency constraint to prevent L3INIT to enter RET/OFF when */
+ /* CAWAKE is low */
+
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ return 0;
+}
+
+/**
+ * hsi_driver_int_proc - check all channels / ports for interrupts events
+ * @hsi_ctrl - HSI controler data
+ * @status_offset: interrupt status register offset
+ * @enable_offset: interrupt enable regiser offset
+ * @start: interrupt index to start on
+ * @stop: interrupt index to stop on
+ *
+ * returns the bitmap of processed events
+ *
+ * This function calls the related processing functions and triggered events.
+ * Events are cleared after corresponding function has been called.
+*/
+static u32 hsi_driver_int_proc(struct hsi_port *pport,
+ unsigned long status_offset,
+ unsigned long enable_offset, unsigned int start,
+ unsigned int stop)
+{
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = pport->port_number;
+ unsigned int channel;
+ u32 status_reg;
+ u32 hsr_err_reg;
+ u32 channels_served = 0;
+
+ /* Get events status */
+ status_reg = hsi_inl(base, status_offset);
+ status_reg &= hsi_inl(base, enable_offset);
+
+ if (pport->cawake_off_event) {
+ dev_dbg(hsi_ctrl->dev, "CAWAKE detected from OFF mode.\n");
+ } else if (!status_reg) {
+ dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : no event, exit.\n",
+ start, stop);
+ return 0;
+ } else {
+ dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : Events 0x%08x\n",
+ start, stop, status_reg);
+ }
+
+ if (status_reg & HSI_BREAKDETECTED) {
+ dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port);
+ hsi_outl(0, base, HSI_HSR_BREAK_REG(port));
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL);
+ spin_lock(&hsi_ctrl->lock);
+
+ channels_served |= HSI_BREAKDETECTED;
+ }
+
+ if (status_reg & HSI_ERROROCCURED) {
+ hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port));
+ if (hsr_err_reg & HSI_HSR_ERROR_SIG)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "Signal Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_FTE)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "Frame Timeout Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_TBE)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "Tailing Bit Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_RME)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "RX Mapping Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_TME)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "TX Mapping Error");
+ /* Clear error event bit */
+ hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port));
+ if (hsr_err_reg) { /* ignore spurious errors */
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ } else
+ dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n");
+
+ channels_served |= HSI_ERROROCCURED;
+ }
+
+ for (channel = start; channel <= stop; channel++) {
+ if (status_reg & HSI_HST_DATAACCEPT(channel)) {
+ hsi_do_channel_tx(&pport->hsi_channel[channel]);
+ channels_served |= HSI_HST_DATAACCEPT(channel);
+ }
+
+ if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) {
+ hsi_do_channel_rx(&pport->hsi_channel[channel]);
+ channels_served |= HSI_HSR_DATAAVAILABLE(channel);
+ }
+
+ if (status_reg & HSI_HSR_DATAOVERRUN(channel)) {
+ /*HSI_TODO : Data overrun handling*/
+ dev_err(hsi_ctrl->dev,
+ "Data overrun in real time mode !\n");
+ }
+ }
+
+ /* CAWAKE falling or rising edge detected */
+ if ((status_reg & HSI_CAWAKEDETECTED) || pport->cawake_off_event) {
+ if (hsi_do_cawake_process(pport) == -EAGAIN)
+ goto proc_done;
+
+ channels_served |= HSI_CAWAKEDETECTED;
+ pport->cawake_off_event = false;
+ }
+proc_done:
+ /* Reset status bits */
+ hsi_outl(channels_served, base, status_offset);
+
+ return channels_served;
+}
+
+static u32 hsi_process_int_event(struct hsi_port *pport)
+{
+ unsigned int port = pport->port_number;
+ unsigned int irq = pport->n_irq;
+ u32 status_reg;
+
+ /* Process events for channels 0..7 */
+ status_reg = hsi_driver_int_proc(pport,
+ HSI_SYS_MPU_STATUS_REG(port, irq),
+ HSI_SYS_MPU_ENABLE_REG(port, irq),
+ 0,
+ min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX) - 1);
+
+ /* Process events for channels 8..15 */
+ if (pport->max_ch > HSI_SSI_CHANNELS_MAX)
+ status_reg |= hsi_driver_int_proc(pport,
+ HSI_SYS_MPU_U_STATUS_REG(port, irq),
+ HSI_SYS_MPU_U_ENABLE_REG(port, irq),
+ HSI_SSI_CHANNELS_MAX, pport->max_ch - 1);
+
+ return status_reg;
+}
+
+static void do_hsi_tasklet(unsigned long hsi_port)
+{
+ struct hsi_port *pport = (struct hsi_port *)hsi_port;
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ u32 status_reg;
+
+ dev_dbg(hsi_ctrl->dev, "Int Tasklet : clock_enabled=%d\n",
+ hsi_ctrl->clock_enabled);
+#if 0
+ if (pport->cawake_off_event == true)
+ dev_info(hsi_ctrl->dev,
+ "Tasklet called from OFF/RET MODE THRU PAD CPU ID %d\n",
+ smp_processor_id());
+ else
+ dev_info(hsi_ctrl->dev,
+ "Tasklet called from ACTIVE MODE CPU ID %d\n",
+ smp_processor_id());
+#endif
+ spin_lock(&hsi_ctrl->lock);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+ pport->in_int_tasklet = true;
+
+ status_reg = hsi_process_int_event(pport);
+
+ pport->in_int_tasklet = false;
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+ spin_unlock(&hsi_ctrl->lock);
+ shceduled_already_flag = 0;
+ enable_irq(pport->irq);
+}
+
+static irqreturn_t hsi_mpu_handler(int irq, void *p)
+{
+ struct hsi_port *pport = p;
+#if 0
+ printk(KERN_INFO "Tasklet called from MPU HANDLER CPU ID %d "
+ "\t STS 0x%0x \t ENB 0x%0x\n", smp_processor_id(),
+ omap_readl(0x4A058808), omap_readl(0x4A05880C));
+#endif
+ if (shceduled_already_flag == 0) {
+#if 0
+ tasklet_hi_schedule(&pport->hsi_tasklet);
+ if (TASKLET_STATE_SCHED == pport->hsi_tasklet.state) {
+ printk(KERN_INFO "MPU TASKLET ALREADY SCHEDULED RETURNING\n");
+ return IRQ_HANDLED;
+ }
+#endif
+ shceduled_already_flag = 1;
+ tasklet_hi_schedule(&pport->hsi_tasklet);
+ /* Disable interrupt until Bottom Half has cleared the */
+ /* IRQ status register */
+ disable_irq_nosync(pport->irq);
+ }
+ return IRQ_HANDLED;
+}
+
+int __init hsi_mpu_init(struct hsi_port *hsi_p, const char *irq_name)
+{
+ int err;
+
+ tasklet_init(&hsi_p->hsi_tasklet, do_hsi_tasklet, (unsigned long)hsi_p);
+
+ dev_info(hsi_p->hsi_controller->dev, "Registering IRQ %s (%d)\n",
+ irq_name, hsi_p->irq);
+ err = request_irq(hsi_p->irq, hsi_mpu_handler,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH,
+ irq_name, hsi_p);
+ if (err < 0) {
+ dev_err(hsi_p->hsi_controller->dev, "FAILED to MPU request"
+ " IRQ (%d) on port %d", hsi_p->irq, hsi_p->port_number);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void hsi_mpu_exit(struct hsi_port *hsi_p)
+{
+ tasklet_kill(&hsi_p->hsi_tasklet);
+ free_irq(hsi_p->irq, hsi_p);
+}
diff --git a/drivers/omap_hsi/hsi_protocol.c b/drivers/omap_hsi/hsi_protocol.c
new file mode 100644
index 0000000..e1451e7
--- /dev/null
+++ b/drivers/omap_hsi/hsi_protocol.c
@@ -0,0 +1,308 @@
+/*
+ * File - hsi_protocol.c
+ *
+ * Implements HSI protocol for Infineon Modem.
+ *
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ * Author: Rupesh Gujare <rupesh.g@samsung.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#if 0
+#define DEBUG 1
+#endif
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include "hsi-protocol-if.h"
+#include <linux/hsi_driver_if.h>
+
+#define DRIVER_VERSION "1.0"
+
+char test_data[10] = "abcdefghij";
+
+dev_t hsi_protocol_dev;
+
+struct protocol_queue {
+ struct list_head list;
+ u32 *data;
+ unsigned int count;
+};
+
+struct hsi_protocol {
+ unsigned int opened;
+ int poll_event;
+ struct list_head rx_queue;
+ struct list_head tx_queue;
+ spinlock_t lock; /* Serialize access to driver data and API */
+ struct fasync_struct *async_queue;
+ wait_queue_head_t rx_wait;
+ wait_queue_head_t tx_wait;
+ wait_queue_head_t poll_wait;
+};
+
+static struct hsi_protocol hsi_protocol_data[HSI_MAX_CHANNELS];
+
+void if_notify(int ch, struct hsi_event *ev)
+{
+ struct protocol_queue *entry;
+
+ pr_debug("%s, ev = {0x%x, 0x%p, %u}\n",
+ __func__, ev->event, ev->data, ev->count);
+
+ spin_lock(&hsi_protocol_data[ch].lock);
+
+/* Not Required */
+ /*if (!hsi_protocol_data[ch].opened) {
+ pr_debug("%s, device not opened\n!", __func__);
+ printk(KERN_INFO "%s, device not opened\n!", __func__);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ return;
+ }*/
+
+ switch (HSI_EV_TYPE(ev->event)) {
+ case HSI_EV_IN:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ list_add_tail(&entry->list, &hsi_protocol_data[ch].rx_queue);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_IN\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].rx_wait);
+ break;
+ case HSI_EV_OUT:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ hsi_protocol_data[ch].poll_event |= (POLLOUT | POLLWRNORM);
+ list_add_tail(&entry->list, &hsi_protocol_data[ch].tx_queue);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_OUT\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].tx_wait);
+ break;
+ case HSI_EV_EXCEP:
+ hsi_protocol_data[ch].poll_event |= POLLPRI;
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_EXCEP\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].poll_wait);
+ break;
+ case HSI_EV_AVAIL:
+ hsi_protocol_data[ch].poll_event |= (POLLIN | POLLRDNORM);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_AVAIL\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].poll_wait);
+ break;
+ default:
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ break;
+ }
+}
+
+int hsi_proto_read(int ch, u32 *buffer, int count)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len = 0;
+ struct protocol_queue *entry;
+ int ret, recv_data = 0;
+
+ /*if (count > MAX_HSI_IPC_BUFFER)
+ count = MAX_HSI_IPC_BUFFER;
+
+ data = kmalloc(count, GFP_ATOMIC);*/
+
+ ret = if_hsi_read(ch, buffer, count);
+ if (ret < 0) {
+ pr_err("Can not submit read. READ Error\n");
+ goto out2;
+ }
+
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ add_wait_queue(&hsi_protocol_data[ch].rx_wait, &wait);
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ if (!list_empty(&hsi_protocol_data[ch].rx_queue)) {
+ entry = list_entry(hsi_protocol_data[ch].rx_queue.next,
+ struct protocol_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ pr_debug("%s, data = 0x%p, data_len = %d\n",
+ __func__, data, data_len);
+
+ if (data_len) {
+ pr_debug("%s, RX finished, ch-> %d, length = %d\n",
+ __func__, ch, count);
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ hsi_protocol_data[ch].poll_event &=
+ ~(POLLIN | POLLRDNORM);
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+ if_hsi_poll(ch);
+#if 0
+ memcpy(buffer, data, count);
+#endif
+ recv_data += data_len;
+#if 0
+ buffer += data_len;
+ if ((recv_data == count) || (recv_data >= MAX_HSI_IPC_BUFFER))
+#endif
+ break;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ recv_data = -EAGAIN;
+ if_hsi_cancel_read(ch);
+ /* goto out; */
+ break;
+ }
+
+ /*printk(KERN_DEBUG "%s, going to sleep...\n", __func__); */
+ schedule();
+ /*printk(KERN_DEBUG "%s, woke up\n", __func__); */
+ }
+
+/*out:*/
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_protocol_data[ch].rx_wait, &wait);
+
+out2:
+ /*To Do- Set bit if data to be received is
+ * greater than 512K Bytes and return to IPC call
+ */
+
+ return recv_data;
+}
+
+int hsi_proto_write(int ch, u32 *buffer, int length)
+{
+
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len = 0, ret = -1;
+ struct protocol_queue *entry;
+
+ ret = if_hsi_write(ch, buffer, length);
+ if (ret < 0) {
+ pr_err("HSI Write ERROR %s\n", __func__);
+ goto out2;
+ } else
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ hsi_protocol_data[ch].poll_event &= ~(POLLOUT | POLLWRNORM);
+ add_wait_queue(&hsi_protocol_data[ch].tx_wait, &wait);
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ if (!list_empty(&hsi_protocol_data[ch].tx_queue)) {
+ entry = list_entry(hsi_protocol_data[ch].tx_queue.next,
+ struct protocol_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ if (data_len) {
+ pr_debug("%s, TX finished, data_len = %d, ch-> %d\n",
+ __func__, length, ch);
+ ret = data_len;
+ break;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ schedule();
+ }
+
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_protocol_data[ch].tx_wait, &wait);
+
+out2:
+ return ret;
+}
+EXPORT_SYMBOL(hsi_proto_write);
+
+static int __init hsi_protocol_init(void)
+{
+ int i, ret = 0;
+
+ pr_info("HSI Infineon Protocol driver version " DRIVER_VERSION "\n");
+
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ init_waitqueue_head(&hsi_protocol_data[i].rx_wait);
+ init_waitqueue_head(&hsi_protocol_data[i].tx_wait);
+ init_waitqueue_head(&hsi_protocol_data[i].poll_wait);
+ spin_lock_init(&hsi_protocol_data[i].lock);
+ hsi_protocol_data[i].opened = 0;
+ INIT_LIST_HEAD(&hsi_protocol_data[i].rx_queue);
+ INIT_LIST_HEAD(&hsi_protocol_data[i].tx_queue);
+ }
+
+ printk(KERN_INFO "hsi_protocol_init : hsi_mux_setting Done.\n");
+
+ ret = if_hsi_init();
+
+ return ret;
+}
+
+
+static void __exit hsi_protocol_exit(void)
+{
+ if_hsi_exit();
+}
+
+
+MODULE_AUTHOR("Rupesh Gujare <rupesh.g@samsung.com> / Samsung Electronics");
+MODULE_DESCRIPTION("HSI Protocol for Infineon Modem");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(hsi_protocol_init);
+module_exit(hsi_protocol_exit);
diff --git a/drivers/omap_hsi/hsi_protocol_cmd.c b/drivers/omap_hsi/hsi_protocol_cmd.c
new file mode 100644
index 0000000..d256631
--- /dev/null
+++ b/drivers/omap_hsi/hsi_protocol_cmd.c
@@ -0,0 +1,429 @@
+/*
+ * File - hsi_protocol_if_cmd.c
+ *
+ * Implements HSI protocol for Infineon Modem.
+ *
+ * Copyright (C) 2011 Samsung Electronics. All rights reserved.
+ *
+ * Author: Rupesh Gujare <rupesh.g@samsung.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include <linux/hsi_driver_if.h>
+#include "hsi-protocol-if.h"
+
+extern struct if_hsi_iface hsi_protocol_iface;
+extern wait_queue_head_t ipc_read_wait, ipc_write_wait;
+int if_hsi_openchannel(struct if_hsi_channel *channel);
+int if_hsi_closechannel(struct if_hsi_channel *channel);
+
+extern struct if_hsi_cmd hsi_cmd_history;
+extern int tx_cmd_history_p;
+extern int rx_cmd_history_p;
+
+/*Decode command from received PDU on channle 0*/
+int hsi_decode_cmd(u32 *cmd_data, u32 *cmd, u32 *ch, u32 *param)
+{
+ int ret = 0;
+ u32 data = *cmd_data;
+ u8 lrc_cal, lrc_act;
+ u8 val1, val2, val3;
+
+ *cmd = ((data & 0xF0000000) >> 28);
+
+ switch (*cmd) {
+ case HSI_LL_MSG_BREAK:
+ pr_err("Command MSG_BREAK Received.\n");
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = ((data & 0x00FFFF00) >> 8);
+ /*Check LRC*/
+ val1 = ((data & 0xFF000000) >> 24);
+ val2 = ((data & 0x00FF0000) >> 16);
+ val3 = ((data & 0x0000FF00) >> 8);
+ lrc_act = (data & 0x000000FF);
+ lrc_cal = val1 ^ val2 ^ val3;
+ if (lrc_cal != lrc_act)
+ ret = -1;
+ break;
+
+ case HSI_LL_MSG_CONN_READY:
+ case HSI_LL_MSG_CONN_CLOSED:
+ case HSI_LL_MSG_CANCEL_CONN:
+ case HSI_LL_MSG_NAK:
+ *ch = ((data & 0x0F000000) >> 24);
+ break;
+
+ case HSI_LL_MSG_ACK:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = (data & 0x00FFFFFF);
+ //printk(KERN_INFO "ACK Received ch=%d, param=%d\n",*ch, *param);
+ break;
+
+ case HSI_LL_MSG_CONF_RATE:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = ((data & 0x0F000000) >> 24);
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN_OCTET:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = (data & 0x00FFFFFF);
+ break;
+
+ case HSI_LL_MSG_ECHO:
+ case HSI_LL_MSG_INFO_REQ:
+ case HSI_LL_MSG_INFO:
+ case HSI_LL_MSG_CONFIGURE:
+ case HSI_LL_MSG_ALLOCATE_CH:
+ case HSI_LL_MSG_RELEASE_CH:
+ case HSI_LL_MSG_INVALID:
+ *cmd = HSI_LL_MSG_INVALID;
+ *ch = HSI_LL_INVALID_CHANNEL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int protocol_create_cmd(int cmd_type, unsigned int channel, void *arg)
+{
+ unsigned int command = 0;
+ int ret = 0;
+
+ switch (cmd_type) {
+ case HSI_LL_MSG_BREAK:
+ {
+ command = 0;
+ }
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN:
+ {
+ unsigned int size = *(unsigned int *)arg;
+ unsigned int lcr = 0;
+
+/* if(size > 4)
+ size = (size & 0x3) ? ((size >> 2) + 1):(size >> 2);
+ else
+ size = 1;*/
+
+ command = ((HSI_LL_MSG_OPEN_CONN & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((size & 0x0000FFFF) << 8);
+
+ lcr = ((command & 0xFF000000) >> 24) ^
+ ((command & 0x00FF0000) >> 16) ^
+ ((command & 0x0000FF00) >> 8);
+
+ command = command | (lcr & 0x000000FF);
+ }
+ break;
+
+ case HSI_LL_MSG_CONN_READY:
+ {
+ command = ((HSI_LL_MSG_CONN_READY & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24);
+ }
+ break;
+
+ case HSI_LL_MSG_CONN_CLOSED:
+ {
+ command = ((HSI_LL_MSG_CONN_CLOSED & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24);
+ }
+ break;
+
+ case HSI_LL_MSG_CANCEL_CONN:
+ {
+ unsigned int role = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_CANCEL_CONN & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((role & 0x000000FF) << 16);
+ }
+ break;
+
+ case HSI_LL_MSG_ACK:
+ {
+ unsigned int echo_params = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_ACK & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((echo_params & 0x00FFFFFF));
+ }
+ break;
+
+ case HSI_LL_MSG_NAK:
+ {
+ command = ((HSI_LL_MSG_NAK & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24);
+ }
+ break;
+
+ case HSI_LL_MSG_CONF_RATE:
+ {
+ unsigned int baud_rate = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_CONF_RATE & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((baud_rate & 0x00FFFFFF));
+ }
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN_OCTET:
+ {
+ unsigned int size = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_OPEN_CONN_OCTET & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((size & 0x00FFFFFF));
+
+ }
+ break;
+
+ case HSI_LL_MSG_ECHO:
+ case HSI_LL_MSG_INFO_REQ:
+ case HSI_LL_MSG_INFO:
+ case HSI_LL_MSG_CONFIGURE:
+ case HSI_LL_MSG_ALLOCATE_CH:
+ case HSI_LL_MSG_RELEASE_CH:
+ case HSI_LL_MSG_INVALID:
+ ret = -1;
+ break;
+ }
+ return command;
+}
+
+int set_tx_config(struct if_hsi_channel *ch, u32 mode, u32 max_channels)
+{
+ struct hst_ctx tx_config;
+ int ret;
+
+ hsi_ioctl(ch->dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = mode;
+ tx_config.channels = max_channels;
+ ret = hsi_ioctl(ch->dev, HSI_IOCTL_SET_TX, &tx_config);
+ return ret;
+}
+
+static int saved_cmd_queue = 0;
+static u32 cmd_saved[5];
+int hsi_protocol_send_command(u32 cmd, u32 channel, u32 param)
+{
+ struct if_hsi_channel *channel_zero;
+ u32 cmd_array[4] = {0x00000000, 0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC}, ret = -1;
+
+ channel_zero = &hsi_protocol_iface.channels[0];
+ cmd_array[0] = protocol_create_cmd(cmd, channel, &param);
+ pr_debug("[%s] CMD = %08x\n",__func__, cmd_array[0]);
+ while (channel_zero->tx_state != HSI_LL_TX_STATE_IDLE) {
+ cmd_saved[saved_cmd_queue] = cmd_array[0];
+ saved_cmd_queue++;
+ pr_debug("(%s) cmd_saved : %x(%d)\n", __func__, cmd_array[0], saved_cmd_queue);
+
+ return 0;
+ }
+
+send_retry:
+
+ channel_zero->tx_state = HSI_LL_TX_STATE_TX;
+
+ // For es 2.1 ver.
+ ret = hsi_proto_write(0, cmd_array, 4);
+ if (ret < 0) {
+ pr_err("(%s) Command Write failed, CMD->%X\n", __func__, cmd_array[0]);
+ channel_zero->tx_state = HSI_LL_TX_STATE_IDLE;
+ return -1;
+ } else {
+ channel_zero->tx_state = HSI_LL_TX_STATE_IDLE;
+
+ pr_debug("[%s] CMD = %08x\n", __func__, cmd_array[0]);
+
+ hsi_cmd_history.tx_cmd[tx_cmd_history_p] = cmd_array[0];
+ hsi_cmd_history.tx_cmd_time[tx_cmd_history_p] = CURRENT_TIME;
+ tx_cmd_history_p++;
+ if (tx_cmd_history_p >= 50)
+ tx_cmd_history_p = 0;
+
+ if (saved_cmd_queue) {
+ saved_cmd_queue--;
+ cmd_array[0] = cmd_saved[saved_cmd_queue];
+
+ goto SEND_RETRY;
+ }
+
+ return 0;
+ }
+}
+
+void rx_stm(u32 cmd, u32 ch, u32 param)
+{
+ struct if_hsi_channel *channel;
+ u32 size = 0, tmp_cmd = 0, ret, i;
+ channel = &hsi_protocol_iface.channels[ch];
+
+ switch (cmd) {
+ case HSI_LL_MSG_OPEN_CONN:
+ pr_err("ERROR... OPEN_CONN Not supported. Should use OPEN_CONN_OCTECT instead.\n");
+ break;
+
+ case HSI_LL_MSG_ECHO:
+ pr_err("ERROR... HSI_LL_MSG_ECHO not supported.\n");
+ break;
+
+ case HSI_LL_MSG_CONN_CLOSED:
+ switch (channel->tx_state) {
+ case HSI_LL_TX_STATE_WAIT_FOR_CONN_CLOSED:
+ channel->tx_state = HSI_LL_TX_STATE_IDLE;
+
+ /* ACWAKE ->LOW */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_DOWN, NULL);
+ if (ret == 0)
+ pr_debug("ACWAKE pulled low in %s()\n", __func__);
+ else
+ pr_err("ACWAKE pulled low in %s() ERROR : %d\n", __func__, ret);
+
+ pr_debug("[%s] Received CONN_CLOSED. ch-> %d\n", __func__,ch);
+ break;
+
+ default:
+ pr_err("Wrong STATE for CONN_CLOSED\n");
+ }
+ break;
+
+ case HSI_LL_MSG_CANCEL_CONN:
+ pr_debug("Received CANCEL_CONN\n");
+ break;
+
+ case HSI_LL_MSG_ACK:
+ switch (channel->tx_state) {
+ case HSI_LL_TX_STATE_WAIT_FOR_ACK:
+ case HSI_LL_TX_STATE_SEND_OPEN_CONN:
+ //printk(KERN_INFO "ACK received %s()\n",__func__);
+
+ channel->tx_state = HSI_LL_TX_STATE_TX;
+ size = param;
+#if 0
+ // TEMP: send/read by 16 byte unit for v.11A(CP)
+ if ((size > 16) && (size % 16))
+ size += (16 - (size % 16));
+ else if (size < 16)
+ size = 16;
+#endif
+
+ // For es 2.1 ver.
+ if (size % 4)
+ size += (4 - (size % 4));
+
+ pr_debug("Writing %d bytes data on channel %d, tx_buf = %x, in %s()\n", size, ch, channel->tx_buf, __func__);
+ ret = hsi_proto_write(ch, channel->tx_buf, size);
+ channel->tx_state = HSI_LL_TX_STATE_WAIT_FOR_CONN_CLOSED;
+ wake_up_interruptible(&ipc_write_wait);
+ channel->tx_nak_count = 0;
+ break;
+
+ case HSI_LL_TX_STATE_CLOSED:/* ACK as response to CANCEL_CONN */
+ if (channel->rx_state == HSI_LL_RX_STATE_WAIT_FOR_CANCEL_CONN_ACK)
+ channel->rx_state = HSI_LL_RX_STATE_IDLE;
+ break;
+
+ case HSI_LL_TX_STATE_WAIT_FOR_CONF_ACK: /* ACK as response to CONF_RATE */
+ //TODO: SET CONF RATE
+ pr_debug("ACK Received for CONF_RATE\n");
+ break;
+
+ default:
+ pr_err("ACK Received for Unknown state\n");
+ }
+ break;
+
+ case HSI_LL_MSG_NAK:
+ switch (channel->tx_state) {
+ case HSI_LL_TX_STATE_WAIT_FOR_ACK:
+ printk(KERN_INFO "(%s) NAK received. ch->%d\n", __func__, ch);
+ //channel->tx_state = HSI_LL_TX_STATE_NACK;
+ if (channel->tx_nak_count < 10) {
+ msleep(10);
+
+ tmp_cmd = ((HSI_LL_MSG_OPEN_CONN_OCTET & 0x0000000F) << 28) |
+ ((ch & 0x000000FF) << 24);
+ for (i = 49; i >= 0; i--) {
+ if ((hsi_cmd_history.tx_cmd[i] & 0xFFF00000) == tmp_cmd)
+ break;
+ }
+ size = (hsi_cmd_history.tx_cmd[i] & 0x000FFFFF);
+
+ pr_debug("(%s) Re Send OPEN CONN ch->%d, size->%d, count->%d\n", __func__, ch, size, channel->tx_nak_count);
+
+ hsi_protocol_send_command(HSI_LL_MSG_OPEN_CONN_OCTET, ch, size);
+ channel->tx_nak_count++;
+ } else {
+ hsi_protocol_send_command(HSI_LL_MSG_BREAK, ch, size);
+ pr_debug("(%s) Sending MSG_BREAK. ch->%d\n", __func__, ch);
+ //TODO Reset All channels and inform IPC write about failure (Possibly by sending signal)
+ }
+ break;
+
+ case HSI_LL_TX_STATE_WAIT_FOR_CONF_ACK: /* NAK as response to CONF_RATE */
+ channel->tx_state = HSI_LL_TX_STATE_IDLE;
+ break;
+
+ default:
+ pr_err("ERROR - Received NAK in invalid state. state->%d\n", channel->tx_state);
+ }
+ break;
+
+ case HSI_LL_MSG_CONF_RATE:
+ //TODO: Set Conf Rate
+ pr_debug("CONF_RATE Received\n");
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN_OCTET:
+ switch (channel->rx_state) {
+ /* case HSI_LL_RX_STATE_CLOSED: */
+ case HSI_LL_RX_STATE_IDLE:
+ pr_debug("OPEN_CONN_OCTET in %s(), ch-> %d\n", __func__, ch);
+ channel->rx_state = HSI_LL_RX_STATE_TO_ACK;
+ hsi_protocol_send_command(HSI_LL_MSG_ACK, ch, param);
+
+ channel->rx_count = param;
+ channel->rx_state = HSI_LL_RX_STATE_RX;
+ wake_up_interruptible(&ipc_read_wait);
+ break;
+
+ case HSI_LL_RX_STATE_BLOCKED:
+ /* TODO */
+ break;
+
+ default:
+ pr_err("OPEN_CONN_OCTET in invalid state, Current State -> %d\n", channel->rx_state);
+ pr_info("Sending NAK to channel-> %d\n", ch);
+ hsi_protocol_send_command(HSI_LL_MSG_NAK, ch, param);
+ }
+ break;
+
+ default:
+ pr_err("Invalid Command encountered in rx_state()\n");
+ }
+
+}
diff --git a/drivers/omap_hsi/hsi_protocol_if.c b/drivers/omap_hsi/hsi_protocol_if.c
new file mode 100644
index 0000000..ced5dae
--- /dev/null
+++ b/drivers/omap_hsi/hsi_protocol_if.c
@@ -0,0 +1,896 @@
+/*
+ * File - hsi_protocol_if.c
+ *
+ * Implements HSI protocol for Infineon Modem.
+ *
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ * Author: Rupesh Gujare <rupesh.g@samsung.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <asm/mach-types.h>
+#include <linux/ioctl.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/bitmap.h>
+#include <linux/poll.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+
+#include <linux/hsi_driver_if.h>
+#include "hsi-protocol-if.h"
+
+//#define DEBUG 1
+//#define DEBUG_PHY_DATA 1
+
+#define HSI_CHANNEL_STATE_UNAVAIL (1 << 0)
+#define HSI_CHANNEL_STATE_READING (1 << 1)
+#define HSI_CHANNEL_STATE_WRITING (1 << 2)
+
+
+struct if_hsi_iface hsi_protocol_iface;
+wait_queue_head_t ipc_read_wait, ipc_write_wait;
+
+
+static void if_hsi_protocol_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg);
+static int __devinit hsi_protocol_probe(struct hsi_device *dev);
+static int __devexit hsi_protocol_remove(struct hsi_device *dev);
+
+static struct hsi_device_driver if_hsi_protocol_driver = {
+ .ctrl_mask = ANY_HSI_CONTROLLER,
+ .probe = hsi_protocol_probe,
+ .remove = __devexit_p(hsi_protocol_remove),
+ .driver = {
+ .name = "hsi_protocol"
+ },
+};
+
+struct if_hsi_cmd hsi_cmd_history;
+int tx_cmd_history_p = 0;
+int rx_cmd_history_p = 0;
+
+static int if_hsi_read_on(int ch, u32 *data, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ pr_err("Read still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+ channel->state |= HSI_CHANNEL_STATE_READING;
+ channel->rx_data = data;
+ channel->rx_count = count;
+ spin_unlock(&channel->lock);
+
+ ret = hsi_read(channel->dev, data, count / 4);
+ dev_dbg(&channel->dev->device, "%s, ch = %d, ret = %d\n", __func__, ch,
+ ret);
+
+ return ret;
+}
+
+static void if_hsi_proto_read_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+#ifdef DEBUG_PHY_DATA
+ u32 *tmp;
+ u32 i;
+#endif
+
+ //printk(KERN_INFO "if_hsi_proto_read_done() is called for ch-> %d\n", dev->n_ch);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ ev.event = HSI_EV_IN;
+ ev.data = channel->rx_data;
+ ev.count = 4 * size;
+ spin_unlock(&channel->lock);
+
+#ifdef DEBUG_PHY_DATA
+ //Check received data -> Commented as it adds delay which causes MSG_BREAK
+ tmp = channel->rx_data;
+ printk(KERN_INFO "[%s](%d)(%d) RX = ", __func__, dev->n_ch, ev.count);
+ for (i = 0; i < ((size > 5) ? 5 : size); i++) {
+ printk(KERN_INFO "%08x ", *tmp);
+ tmp++;
+ }
+ printk(KERN_INFO "\n");
+#endif
+
+ if_notify(dev->n_ch, &ev);
+}
+
+int if_hsi_read(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_read_on(ch, data, count);
+ return ret;
+}
+
+int if_hsi_poll(int ch)
+{
+ struct if_hsi_channel *channel;
+ int ret = 0;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_poll(channel->dev);
+ return ret;
+}
+
+static int if_hsi_write_on(int ch, u32 *address, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_protocol_iface.channels[ch];
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ pr_err("Write still pending on channel %d\n", ch);
+ printk(KERN_INFO "Write still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+
+ channel->tx_data = address;
+ channel->tx_count = count;
+ channel->state |= HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_write(channel->dev, address, count / 4);
+ return ret;
+}
+
+
+static void if_hsi_proto_write_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+#ifdef DEBUG_PHY_DATA
+ u32 *tmp;
+ u32 i;
+#endif
+
+ //printk(KERN_INFO "if_hsi_proto_write_done() is called for ch-> %d\n", dev->n_ch);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ ev.event = HSI_EV_OUT;
+ ev.data = channel->tx_data;
+ ev.count = 4 * size;
+ spin_unlock(&channel->lock);
+
+#ifdef DEBUG_PHY_DATA
+ //Check Outgoing data, Commented as it adds delay which causes MSG_BREAK
+ tmp = channel->tx_data;
+ printk(KERN_INFO "[%s](%d)(%d) TX = ", __func__, dev->n_ch, ev.count);
+ for (i = 0; i < ((size > 5) ? 5 : size); i++) {
+ printk(KERN_INFO "%08x ", *tmp);
+ tmp++;
+ }
+ printk(KERN_INFO "\n");
+#endif
+
+ if_notify(dev->n_ch, &ev);
+
+}
+
+int if_hsi_write(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_write_on(ch, data, count);
+ return ret;
+}
+
+void if_hsi_cancel_read(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ if (channel->state & HSI_CHANNEL_STATE_READING)
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+}
+
+void if_hsi_set_wakeline(int ch, unsigned int state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ hsi_ioctl(channel->dev,
+ state ? HSI_IOCTL_ACWAKE_UP : HSI_IOCTL_ACWAKE_DOWN, NULL);
+}
+
+
+static void if_hsi_protocol_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg)
+{
+ struct hsi_event ev;
+ int i;
+
+ ev.event = HSI_EV_EXCEP;
+ ev.data = (u32 *) 0;
+ ev.count = 0;
+
+
+ switch (event) {
+ case HSI_EVENT_BREAK_DETECTED:
+ pr_debug("%s, HWBREAK detected\n", __func__);
+ ev.data = (u32 *) HSI_HWBREAK;
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ if (hsi_protocol_iface.channels[i].opened)
+ if_notify(i, &ev);
+ }
+ break;
+ case HSI_EVENT_HSR_DATAAVAILABLE:
+ i = (int)arg;
+ pr_debug("%s, HSI_EVENT_HSR_DATAAVAILABLE channel = %d\n",
+ __func__, i);
+ ev.event = HSI_EV_AVAIL;
+ if (hsi_protocol_iface.channels[i].opened)
+ if_notify(i, &ev);
+ break;
+ case HSI_EVENT_CAWAKE_UP:
+ pr_debug("%s, CAWAKE up\n", __func__);
+ break;
+ case HSI_EVENT_CAWAKE_DOWN:
+ pr_debug("%s, CAWAKE down\n", __func__);
+ break;
+ case HSI_EVENT_ERROR:
+ pr_debug("%s, HSI ERROR occured\n", __func__);
+ break;
+ default:
+ pr_warning("%s, Unknown event(%d)\n", __func__, event);
+ break;
+ }
+}
+
+int if_hsi_openchannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (channel->state == HSI_CHANNEL_STATE_UNAVAIL) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ if (channel->opened) {
+ ret = -EBUSY;
+ goto leave;
+ }
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+ spin_unlock(&channel->lock);
+
+ ret = hsi_open(channel->dev);
+ spin_lock(&channel->lock);
+ if (ret < 0) {
+ pr_err("Could not open channel %d\n", channel->channel_id);
+ goto leave;
+ }
+
+ channel->opened = 1;
+ channel->tx_state = HSI_LL_TX_STATE_IDLE;
+ channel->rx_state = HSI_LL_RX_STATE_TO_CONN_READY;
+ printk(KERN_INFO "setting channel->opened=1 for channel %d\n", channel->dev->n_ch);
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+int if_hsi_closechannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (!channel->opened)
+ goto leave;
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ /* Stop any pending read/write */
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ }
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+
+ spin_unlock(&channel->lock);
+ hsi_write_cancel(channel->dev);
+ } else
+ spin_unlock(&channel->lock);
+
+ hsi_close(channel->dev);
+
+ spin_lock(&channel->lock);
+ channel->opened = 0;
+ channel->tx_state = HSI_LL_TX_STATE_CLOSED;
+ channel->rx_state = HSI_LL_RX_STATE_CLOSED;
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+
+/* Read Thread
+* Should be responsible for handling commands
+* Should wait on port events - waitqueue
+*
+*/
+static int hsi_read_thrd(void *data)
+{
+ u32 cmd_data[4], cmd, channel, param = 0;
+ int ret;
+
+ printk(KERN_INFO "Inside read thread\n");
+ while (1) {
+ /*Call hsi_proto_read*/
+ /*Read 16 bytes due to Modem limitation*/
+ //hsi_proto_read(0, cmd_data, (4 * 4));
+
+ // For es 2.1 ver.
+ hsi_proto_read(0, cmd_data, 4);
+
+ hsi_cmd_history.rx_cmd[rx_cmd_history_p] = cmd_data[0];
+ hsi_cmd_history.rx_cmd_time[rx_cmd_history_p] = CURRENT_TIME;
+ rx_cmd_history_p++;
+ if (rx_cmd_history_p >= 50)
+ rx_cmd_history_p = 0;
+
+ /*Decode Command*/
+ ret = hsi_decode_cmd(&cmd_data[0], &cmd, &channel, &param);
+ if (ret != 0) {
+ pr_err("Can not decode command\n");
+ } else {
+ printk(KERN_INFO "%s(),CMD Received-> %x, ch-> %d, param-> %d.\n", __func__, cmd, channel, param);
+ /*Rx State Machine*/
+ rx_stm(cmd, channel, param);
+ }
+ }
+ return 0;
+}
+
+
+int hsi_start_protocol(void)
+{
+ struct hst_ctx tx_config;
+ struct hsr_ctx rx_config;
+ int i, ret = 0;
+
+ printk(KERN_INFO "In function %s()\n", __func__);
+ /*Open All channels */
+ for (i = 0; i <= 5; i++) {
+ ret = if_hsi_openchannel(&hsi_protocol_iface.channels[i]);
+ if (ret < 0)
+ pr_err("Can not Open channel->%d . Can not start HSI protocol\n", i);
+ else
+ printk(KERN_INFO "Channel->%d Open Successful\n", i);
+
+ /*Set Rx Config*/
+ hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_GET_RX, &rx_config);
+ rx_config.mode = 2;
+ rx_config.divisor = 1;
+ rx_config.channels = HSI_MAX_CHANNELS;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_SET_RX, &rx_config);
+ if (ret == 0)
+ printk(KERN_INFO "SET_RX Successful for ch->%d\n", i);
+
+ /*Set Tx Config*/
+ hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = 2;
+ tx_config.divisor = 1;
+ tx_config.channels = HSI_MAX_CHANNELS;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_SET_TX, &tx_config);
+ if (ret == 0)
+ printk(KERN_INFO "SET_TX Successful for ch->%d\n", i);
+ }
+ /*Make channel-0 tx_state to IDLE*/
+ hsi_protocol_iface.channels[0].tx_state = HSI_LL_TX_STATE_IDLE;
+ return ret;
+}
+EXPORT_SYMBOL(hsi_start_protocol);
+
+static int hsi_protocol_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ char *p = page;
+ int len, i;
+
+ p += sprintf(p, "======= HISTORY OF CMD =======\n");
+ p += sprintf(p, " tx_cmd_history_p : %d\n", tx_cmd_history_p);
+ p += sprintf(p, " rx_cmd_history_p : %d\n", rx_cmd_history_p);
+ for (i = 0; i < 50; i++) {
+ p += sprintf(p, " [%d] tx : 0x%08x(%lu.%09lu), rx : 0x%08x(%lu.%09lu)\n",
+ i, hsi_cmd_history.tx_cmd[i], (unsigned long)hsi_cmd_history.tx_cmd_time[i].tv_sec, (unsigned long)hsi_cmd_history.tx_cmd_time[i].tv_nsec,
+ hsi_cmd_history.rx_cmd[i], (unsigned long)hsi_cmd_history.rx_cmd_time[i].tv_sec, (unsigned long)hsi_cmd_history.rx_cmd_time[i].tv_nsec);
+ }
+ p += sprintf(p, "======= HISTORY OF CMD =======\n");
+
+ len = (p - page) - off;
+ if (len < 0)
+ len = 0;
+
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+
+ return len;
+}
+
+int __devexit hsi_protocol_remove(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int port, ret;
+
+ //dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
+ // dev->n_ch);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_protocol_driver.ch_mask[port])
+ break;
+ }
+
+ address = &if_hsi_protocol_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_protocol_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ hsi_set_read_cb(dev, NULL);
+ hsi_set_write_cb(dev, NULL);
+ hsi_set_port_event_cb(dev, NULL);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ channel->dev = NULL;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ ret = 0;
+ }
+ spin_unlock_bh(&hsi_protocol_iface.lock);
+
+ return ret;
+}
+
+int __devinit hsi_protocol_probe(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int port;
+
+ printk(KERN_INFO "Inside Function %s\n", __func__);
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_protocol_driver.ch_mask[port])
+ break;
+ }
+
+ address = &if_hsi_protocol_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_protocol_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ printk(KERN_INFO "Regestering callback functions\n");
+ hsi_set_read_cb(dev, if_hsi_proto_read_done);
+ hsi_set_write_cb(dev, if_hsi_proto_write_done);
+ hsi_set_port_event_cb(dev, if_hsi_protocol_port_event);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ channel->dev = dev;
+ channel->state = 0;
+ channel->rx_state = HSI_LL_RX_STATE_CLOSED;
+ channel->tx_state = HSI_LL_TX_STATE_CLOSED;
+ channel->tx_count = 0;
+ channel->rx_count = 0;
+ channel->tx_nak_count = 0;
+ channel->rx_nak_count = 0;
+ channel->rx_buf = NULL;
+ channel->tx_buf = NULL;
+ hsi_protocol_iface.init_chan_map ^= (1 << dev->n_ch);
+ }
+ spin_unlock_bh(&hsi_protocol_iface.lock);
+
+ return 0;
+
+}
+
+
+int __init if_hsi_init(void)
+{
+ struct if_hsi_channel *channel;
+ int i, ret;
+ struct proc_dir_entry *dir;
+
+ for (i = 0; i < HSI_MAX_PORTS; i++)
+ if_hsi_protocol_driver.ch_mask[i] = 0;
+
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ channel = &hsi_protocol_iface.channels[i];
+ channel->dev = NULL;
+ channel->opened = 0;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ channel->channel_id = i;
+ spin_lock_init(&channel->lock);
+ }
+
+ /*Initialize waitqueue for IPC read*/
+ init_waitqueue_head(&ipc_read_wait);
+ init_waitqueue_head(&ipc_write_wait);
+
+ /*Select All Channels of PORT-1.*/
+ if_hsi_protocol_driver.ch_mask[0] = CHANNEL_MASK;
+
+ ret = hsi_register_driver(&if_hsi_protocol_driver);
+ if (ret)
+ pr_err("Error while registering HSI driver %d", ret);
+
+ dir = create_proc_read_entry("driver/hsi_cmd", 0, 0, hsi_protocol_proc, NULL);
+ if (dir == NULL)
+ printk(KERN_INFO "create_proc_read_entry Fail.\n");
+ printk(KERN_INFO "create_proc_read_entry Done.\n");
+
+ return ret;
+}
+
+int __devexit if_hsi_exit(void)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int i, port;
+
+ pr_debug("%s\n", __func__);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_protocol_driver.ch_mask[port])
+ break;
+ }
+
+ address = &if_hsi_protocol_driver.ch_mask[port];
+
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ channel = &hsi_protocol_iface.channels[i];
+ if (channel->opened) {
+ if_hsi_set_wakeline(i, HSI_IOCTL_ACWAKE_DOWN);
+ if_hsi_closechannel(channel);
+ }
+ }
+
+ hsi_unregister_driver(&if_hsi_protocol_driver);
+ return 0;
+
+}
+
+u32 initialization = 0;
+
+/*Write data to channel*/
+int write_hsi(u32 ch, u32 *data, int length)
+{
+ int ret;
+ //u32 cmd[4] = {0x00000000, 0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC};
+ struct if_hsi_channel *channel;
+ struct task_struct *read_thread;
+
+ channel = &hsi_protocol_iface.channels[ch];
+ channel->tx_buf = data;
+ channel->tx_count = 0;
+
+ //cmd[0] = protocol_create_cmd(HSI_LL_MSG_OPEN_CONN_OCTET, ch, (void *)&length);
+ //printk(KERN_INFO "data ptr is %x\n", data);
+
+ if (initialization == 0) {
+
+#if 0
+ /* ACWAKE ->HIGH */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
+ else
+ printk(KERN_INFO "ACWAKE pulled high in %s() ERROR : %d\n", __func__, ret);
+#endif
+
+ /*Creating read thread*/
+ read_thread = kthread_run(hsi_read_thrd, NULL, "hsi_read_thread");
+
+ initialization++;
+ }
+ /*Wait till previous data transfer is over*/
+ while (channel->tx_state != HSI_LL_TX_STATE_IDLE) {
+ //printk(KERN_INFO "Wait 5ms previous data transfer isn't over %s()\n", __func__);
+
+ //msleep(5);
+
+ return -EAGAIN;
+ }
+
+#if 1
+ /* ACWAKE ->HIGH */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
+ else
+ printk(KERN_INFO "ACWAKE pulled high in %s() ERROR : %d\n", __func__, ret);
+#endif
+
+ channel->tx_state = HSI_LL_TX_STATE_WAIT_FOR_ACK;
+
+ //send_cmd(cmd, channel, data)
+ //ret = hsi_proto_write(0, &cmd, 4*4);
+ //printk(KERN_INFO "Write returned %d\n", ret);
+ hsi_protocol_send_command(HSI_LL_MSG_OPEN_CONN_OCTET, ch, length);
+
+ wait_event_interruptible(ipc_write_wait, channel->tx_count != 0);
+
+ return channel->tx_count;
+
+
+}
+EXPORT_SYMBOL(write_hsi);
+
+
+int read_hsi(u8 *data, u32 ch, u32 *length)
+{
+ int ret, size, tmp, actual_length;
+ struct if_hsi_channel *channel;
+
+ channel = &hsi_protocol_iface.channels[ch];
+ channel->rx_state = HSI_LL_RX_STATE_IDLE;
+
+ //printk(KERN_INFO "In read_hsi() function, Sleeping ... channel-> %d\n", ch);
+ wait_event_interruptible(ipc_read_wait, (channel->rx_count != 0));
+ //printk(KERN_INFO "In read_hsi() function, Waking Up ... channel-> %d\n", ch);
+
+ actual_length = channel->rx_count;
+ size = channel->rx_count;
+
+#if 0
+ // TEMP: send/read by 16 byte unit for v.11A(CP)
+ if ((size > 16) && (size % 16))
+ size += (16 - (size % 16));
+ else if (size < 16)
+ size = 16;
+#endif
+
+ // For es 2.1 ver.
+ if (size % 4)
+ size += (4 - (size % 4));
+
+ ret = hsi_proto_read(ch, (u32 *)data, size);
+ if (ret < 0)
+ printk(KERN_INFO "Read in IPC failed, %s()\n", __func__);
+
+ //printk(KERN_INFO "%s() read returned %d, actual_length = %d, ch-> %d\n", __func__, ret, actual_length, ch);
+ //printk(KERN_INFO "%s() sending CONN_CLOSED.\n", __func__);
+ tmp = hsi_protocol_send_command(HSI_LL_MSG_CONN_CLOSED, ch, 0);
+ //printk(KERN_INFO "%s() Sending CONN_CLOSED Finished. ret = %d\n", __func__, tmp);
+
+ *length = actual_length;
+ channel->rx_count = 0;
+
+ //printk(KERN_INFO "%s() RETURNING TO IPC with ret = %d\n", __func__, ret);
+ return ret;
+
+}
+EXPORT_SYMBOL(read_hsi);
+
+
+//========================================================//
+// ++ Flashless Boot. ++ //
+//========================================================//
+int hsi_start_protocol_single(void)
+{
+ int ret = 0;
+
+ struct hst_ctx tx_config;
+ struct hsr_ctx rx_config;
+
+ /*Open channel 0 */
+ ret = if_hsi_openchannel(&hsi_protocol_iface.channels[0]);
+ if (ret < 0) {
+ pr_err("Can not Open channel 0. Can not start HSI protocol\n");
+ goto err;
+ } else
+ printk(KERN_INFO "if_hsi_openchannel() returned %d\n", ret);
+
+
+ /*Set Tx Config*/
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = 2;
+ tx_config.channels = 1;
+ tx_config.divisor = 0;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_TX, &tx_config);
+ if (ret < 0) {
+ printk(KERN_INFO "write_hsi_direct : SET_TX Fail : %d\n", ret);
+ return ret;
+ }
+
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_RX, &rx_config);
+ rx_config.mode = 2;
+ rx_config.channels = 1;
+ rx_config.divisor = 0;
+ //rx_config.timeout = HZ / 2;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_RX, &rx_config);
+ if (ret < 0) {
+ printk(KERN_INFO "write_hsi_direct : SET_RX Fail : %d\n", ret);
+ return ret;
+ }
+
+ /* ACWAKE ->HIGH */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
+
+err:
+
+ return ret;
+}
+EXPORT_SYMBOL(hsi_start_protocol_single);
+
+int hsi_reconfigure_protocol(void)
+{
+ int ret = 0;
+
+ /* ACWAKE ->LOW */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_DOWN, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled low in %s()\n", __func__);
+ else
+ printk(KERN_INFO "ACWAKE down fail!! %d\n", ret);
+
+
+ /*Clse channel 0 */
+ ret = if_hsi_closechannel(&hsi_protocol_iface.channels[0]);
+ if (ret < 0) {
+ pr_err("Can not Close channel 0. Can not Stop HSI protocol for flashless\n");
+ goto err;
+ }
+
+
+ printk(KERN_INFO "(%s)(%d) hsi_start_protocol Start.\n", __func__, __LINE__);
+ hsi_start_protocol();
+ printk(KERN_INFO "(%s)(%d) hsi_start_protocol Done.\n", __func__, __LINE__);
+
+err:
+
+ return ret;
+}
+EXPORT_SYMBOL(hsi_reconfigure_protocol);
+
+int write_hsi_direct(u32 *data, int length)
+{
+ int retval = 0;
+#if 0
+ struct hst_ctx tx_config;
+
+
+ printk(KERN_INFO "write_hsi_direct : len : %d\n", length);
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = 2;
+ tx_config.channels = 1;
+ tx_config.divisor = 47;
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_TX, &tx_config);
+ if (retval < 0) {
+ printk(KERN_INFO "write_hsi_direct : SET_TX Fail : %d\n", retval);
+ return retval;
+ }
+ printk(KERN_INFO "write_hsi_direct : SET_TX Successful\n");
+
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (retval < 0) {
+ printk(KERN_INFO "write_hsi_direct : ACWAKE High Fail : %d\n", retval);
+ return retval;
+ }
+#endif
+
+#if 0
+ if ((length > 16) && (length % 4))
+ length += (4 - (length % 4));
+ else if (length < 16)
+ length = 16;
+#endif
+
+// printk(KERN_INFO "write_hsi_direct : new len : %d\n", length);
+
+ retval = hsi_proto_write(0, data, length);
+ if (retval < 0) {
+ printk(KERN_INFO "write_hsi_direct : hsi_proto_write Fail : %d\n", retval);
+ return retval;
+ }
+ //printk(KERN_INFO "write_hsi_direct : Write returned %d\n", retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(write_hsi_direct);
+
+int read_hsi_direct(u32 *data, int length)
+{
+ int retval = 0;
+#if 0
+ struct hsr_ctx rx_config;
+
+
+ printk(KERN_INFO "read_hsi_direct : len : %d\n", length);
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_RX, &rx_config);
+ rx_config.mode = 2;
+ rx_config.channels = 1;
+ rx_config.divisor = 47;
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_RX, &rx_config);
+ if (retval < 0) {
+ printk(KERN_INFO "read_hsi_direct : SET_RX Fail : %d\n", retval);
+ return retval;
+ }
+ printk(KERN_INFO "read_hsi_direct : SET_RX Successful\n");
+
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (retval < 0) {
+ printk(KERN_INFO "read_hsi_direct : ACWAKE High Fail : %d\n", retval);
+ return retval;
+ }
+ printk(KERN_INFO "read_hsi_direct : ACWAKE High\n");
+#endif
+
+#if 0
+ if ((length > 16) && (length % 4))
+ length += (4 - (length % 4));
+ else if (length < 16)
+ length = 16;
+#endif
+ //printk(KERN_INFO "read_hsi_direct : new len : %d\n", length);
+
+ retval = hsi_proto_read(0, data, length);
+ if (retval < 0) {
+ printk(KERN_INFO "read_hsi_direct : hsi_proto_read Fail : %d\n", retval);
+ return retval;
+ }
+ //printk(KERN_INFO "read_hsi_direct : Read returned %d\n", retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(read_hsi_direct);
+
+//========================================================//
+// -- Flashless Boot. -- //
+//========================================================//
diff --git a/include/linux/hsi_char.h b/include/linux/hsi_char.h
new file mode 100644
index 0000000..cfa6580
--- /dev/null
+++ b/include/linux/hsi_char.h
@@ -0,0 +1,71 @@
+/*
+ * hsi_char.h
+ *
+ * HSI character driver public declaration header file.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef HSI_CHAR_H
+#define HSI_CHAR_H
+
+#define HSI_CHAR_BASE 'S'
+#define CS_IOW(num, dtype) _IOW(HSI_CHAR_BASE, num, dtype)
+#define CS_IOR(num, dtype) _IOR(HSI_CHAR_BASE, num, dtype)
+#define CS_IOWR(num, dtype) _IOWR(HSI_CHAR_BASE, num, dtype)
+#define CS_IO(num) _IO(HSI_CHAR_BASE, num)
+
+#define CS_SEND_BREAK CS_IO(1)
+#define CS_FLUSH_RX CS_IO(2)
+#define CS_FLUSH_TX CS_IO(3)
+#define CS_BOOTSTRAP CS_IO(4)
+#define CS_SET_ACWAKELINE CS_IOW(5, unsigned int)
+#define CS_GET_ACWAKELINE CS_IOR(6, unsigned int)
+#define CS_SET_RX CS_IOW(7, struct hsi_rx_config)
+#define CS_GET_RX CS_IOW(8, struct hsi_rx_config)
+#define CS_SET_TX CS_IOW(9, struct hsi_tx_config)
+#define CS_GET_TX CS_IOW(10, struct hsi_tx_config)
+#define CS_SW_RESET CS_IO(11)
+#define CS_GET_FIFO_OCCUPANCY CS_IOR(12, size_t)
+
+#define HSI_MODE_SLEEP 0
+#define HSI_MODE_STREAM 1
+#define HSI_MODE_FRAME 2
+
+#define HSI_ARBMODE_RR 0
+#define HSI_ARBMODE_PRIO 1
+
+#define WAKE_UP 1
+#define WAKE_DOWN 0
+
+struct hsi_tx_config {
+ __u32 mode;
+ __u32 flow;
+ __u32 frame_size;
+ __u32 channels;
+ __u32 divisor;
+ __u32 arb_mode;
+};
+
+struct hsi_rx_config {
+ __u32 mode;
+ __u32 flow;
+ __u32 frame_size;
+ __u32 channels;
+ __u32 divisor; /* not used for SSI */
+ __u32 counters;
+};
+
+#endif /* HSI_CHAR_H */
diff --git a/include/linux/hsi_driver_if.h b/include/linux/hsi_driver_if.h
new file mode 100644
index 0000000..547b30e
--- /dev/null
+++ b/include/linux/hsi_driver_if.h
@@ -0,0 +1,181 @@
+/*
+ * hsi_driver_if.h
+ *
+ * Header for the HSI driver low level interface.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef __HSI_DRIVER_IF_H__
+#define __HSI_DRIVER_IF_H__
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/notifier.h>
+
+/* The number of ports handled by the driver (MAX:2). Reducing this value
+ * optimizes the driver memory footprint.
+ */
+#define HSI_MAX_PORTS 1
+
+/* bit-field definition for allowed controller IDs and channels */
+#define ANY_HSI_CONTROLLER -1
+
+/* HSR special divisor values set to control the auto-divisor Rx mode */
+#define HSI_HSR_DIVISOR_AUTO 0x1000 /* Activate auto Rx */
+#define HSI_SSR_DIVISOR_USE_TIMEOUT 0x1001 /* De-activate auto-Rx (SSI) */
+
+enum {
+ HSI_EVENT_BREAK_DETECTED = 0,
+ HSI_EVENT_ERROR,
+ HSI_EVENT_PRE_SPEED_CHANGE,
+ HSI_EVENT_POST_SPEED_CHANGE,
+ HSI_EVENT_CAWAKE_UP,
+ HSI_EVENT_CAWAKE_DOWN,
+ HSI_EVENT_HSR_DATAAVAILABLE,
+};
+
+enum {
+ HSI_IOCTL_ACWAKE_DOWN = 0, /* Unset HST ACWAKE line for channel */
+ HSI_IOCTL_ACWAKE_UP, /* Set HSI wakeup line (acwake) for channel */
+ HSI_IOCTL_SEND_BREAK, /* Send a HW BREAK frame in FRAME mode */
+ HSI_IOCTL_GET_ACWAKE, /* Get HST CAWAKE line status */
+ HSI_IOCTL_FLUSH_RX, /* Force the HSR to idle state */
+ HSI_IOCTL_FLUSH_TX, /* Force the HST to idle state */
+ HSI_IOCTL_GET_CAWAKE, /* Get CAWAKE (HSR) line status */
+ HSI_IOCTL_SET_RX, /* Set HSR configuration */
+ HSI_IOCTL_GET_RX, /* Get HSR configuration */
+ HSI_IOCTL_SET_TX, /* Set HST configuration */
+ HSI_IOCTL_GET_TX, /* Get HST configuration */
+ HSI_IOCTL_SW_RESET, /* Force a HSI SW RESET */
+ HSI_IOCTL_GET_FIFO_OCCUPANCY, /* Get amount of words in RX FIFO */
+ HSI_IOCTL_SET_ACREADY_SAFEMODE,
+ HSI_IOCTL_SET_ACREADY_NORMAL,
+ HSI_IOCTL_SET_3WIRE_MODE,
+ HSI_IOCTL_SET_4WIRE_MODE,
+};
+
+/* Forward references */
+struct hsi_device;
+struct hsi_channel;
+
+/* DPS */
+struct hst_ctx {
+ u32 mode;
+ u32 flow;
+ u32 frame_size;
+ u32 divisor;
+ u32 arb_mode;
+ u32 channels;
+};
+
+struct hsr_ctx {
+ u32 mode;
+ u32 flow;
+ u32 frame_size;
+ u32 divisor;
+ u32 counters;
+ u32 channels;
+};
+
+struct port_ctx {
+ u32 sys_mpu_enable[2];
+ struct hst_ctx hst;
+ struct hsr_ctx hsr;
+};
+
+/**
+ * struct ctrl_ctx - hsi controller regs context
+ * @sysconfig: keeps HSI_SYSCONFIG reg state
+ * @gdd_gcr: keeps DMA_GCR reg state
+ * @dll: keeps HSR_DLL state
+ * @pctx: array of port context
+ */
+struct ctrl_ctx {
+ u32 sysconfig;
+ u32 gdd_gcr;
+ u32 dll;
+ struct port_ctx *pctx;
+};
+/* END DPS */
+
+
+/**
+ * struct hsi_device - HSI device object (Virtual)
+ * @n_ctrl: associated HSI controller platform id number
+ * @n_p: port number
+ * @n_ch: channel number
+ * @ch: channel descriptor
+ * @device: associated device
+*/
+struct hsi_device {
+ int n_ctrl;
+ unsigned int n_p;
+ unsigned int n_ch;
+ struct hsi_channel *ch;
+ struct device device;
+};
+
+#define to_hsi_device(dev) container_of(dev, struct hsi_device, device)
+
+/**
+ * struct hsi_device_driver - HSI driver instance container
+ * @ctrl_mask: bit-field indicating the supported HSI device ids
+ * @ch_mask: bit-field indicating enabled channels for this port
+ * @probe: probe callback (driver registering)
+ * @remove: remove callback (driver un-registering)
+ * @suspend: suspend callback
+ * @resume: resume callback
+ * @driver: associated device_driver object
+*/
+struct hsi_device_driver {
+ unsigned long ctrl_mask;
+ unsigned long ch_mask[HSI_MAX_PORTS];
+ int (*probe) (struct hsi_device *dev);
+ int (*remove) (struct hsi_device *dev);
+ int (*suspend) (struct hsi_device *dev, pm_message_t mesg);
+ int (*resume) (struct hsi_device *dev);
+ struct device_driver driver;
+ void *priv_data;
+
+};
+
+#define to_hsi_device_driver(drv) container_of(drv, \
+ struct hsi_device_driver, \
+ driver)
+
+int hsi_register_driver(struct hsi_device_driver *driver);
+void hsi_unregister_driver(struct hsi_device_driver *driver);
+int hsi_open(struct hsi_device *dev);
+int hsi_write(struct hsi_device *dev, u32 * addr, unsigned int size);
+int hsi_write_cancel(struct hsi_device *dev);
+int hsi_read(struct hsi_device *dev, u32 * addr, unsigned int size);
+int hsi_read_cancel(struct hsi_device *dev);
+int hsi_poll(struct hsi_device *dev);
+int hsi_unpoll(struct hsi_device *dev);
+int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg);
+void hsi_close(struct hsi_device *dev);
+void hsi_set_read_cb(struct hsi_device *dev,
+ void (*read_cb) (struct hsi_device *dev,
+ unsigned int size));
+void hsi_set_write_cb(struct hsi_device *dev,
+ void (*write_cb) (struct hsi_device *dev,
+ unsigned int size));
+void hsi_set_port_event_cb(struct hsi_device *dev,
+ void (*port_event_cb) (struct hsi_device *dev,
+ unsigned int event,
+ void *arg));
+#endif /* __HSI_DRIVER_IF_H__ */