aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/omap_hsi
diff options
context:
space:
mode:
authorDjamil Elaidi <d-elaidi@ti.com>2011-07-07 11:35:56 -0700
committerColin Cross <ccross@android.com>2011-07-13 16:53:16 -0700
commitab74ce507a997ac9afb00934a76aa5e87027d66c (patch)
treed890b9946b3c23c571529bf5ba7b0ebe8bf819a5 /drivers/omap_hsi
parent8abbc21d0dd311f6b687e9d63014049bcb2154ae (diff)
downloadkernel_samsung_tuna-ab74ce507a997ac9afb00934a76aa5e87027d66c.zip
kernel_samsung_tuna-ab74ce507a997ac9afb00934a76aa5e87027d66c.tar.gz
kernel_samsung_tuna-ab74ce507a997ac9afb00934a76aa5e87027d66c.tar.bz2
OMAP4: HSI: driver misc fixes
These fixes are ported from 2.6.35 kernel: Commit reference: http://review.omapzoom.org/#change,13457 : OMAP3+: xSI: extended HSI channel range for hsi_char http://review.omapzoom.org/#change,13659 : OMAP3+: xSI: added char driver support for GET_CAWAKE ioctl http://review.omapzoom.org/#change,13576 : OMAP4: HSI: implement PM suspend/resume ops http://review.omapzoom.org/#change,13741 : OMAP4: HSI: Abort driver registration when bus registration fails http://review.omapzoom.org/#change,13940 : OMAP3+: xSI: kill tasklet on HSI module unload http://review.omapzoom.org/#change,12793 : OMAP3+: xSI: unmap DMA region on R/W cancel Change-Id: Ieb995c1fd4e4d895ea054b7200fd61c5752b50c7 Signed-off-by: Djamil Elaidi <d-elaidi@ti.com> Signed-off-by: Vikram Pandita <vikram.pandita@ti.com>
Diffstat (limited to 'drivers/omap_hsi')
-rw-r--r--drivers/omap_hsi/hsi-char.c39
-rw-r--r--drivers/omap_hsi/hsi-char.h6
-rw-r--r--drivers/omap_hsi/hsi-if.c22
-rw-r--r--drivers/omap_hsi/hsi-if.h8
-rw-r--r--drivers/omap_hsi/hsi_driver.c98
-rw-r--r--drivers/omap_hsi/hsi_driver.h13
-rw-r--r--drivers/omap_hsi/hsi_driver_dma.c87
-rw-r--r--drivers/omap_hsi/hsi_driver_fifo.c3
-rw-r--r--drivers/omap_hsi/hsi_driver_if.c24
-rw-r--r--drivers/omap_hsi/hsi_driver_int.c81
10 files changed, 303 insertions, 78 deletions
diff --git a/drivers/omap_hsi/hsi-char.c b/drivers/omap_hsi/hsi-char.c
index 8e8fa75..871de30 100644
--- a/drivers/omap_hsi/hsi-char.c
+++ b/drivers/omap_hsi/hsi-char.c
@@ -44,16 +44,16 @@
#include "hsi-char.h"
-#define DRIVER_VERSION "0.2.0"
+#define DRIVER_VERSION "0.2.1"
#define HSI_CHAR_DEVICE_NAME "hsi_char"
static unsigned int port = 1;
module_param(port, uint, 1);
MODULE_PARM_DESC(port, "HSI port to be probed");
-static unsigned int channels_map[HSI_MAX_CHAR_DEVS] = { 1 };
-
-module_param_array(channels_map, uint, NULL, 0);
+static unsigned int num_channels;
+static unsigned int channels_map[HSI_MAX_CHAR_DEVS] = { 0 };
+module_param_array(channels_map, uint, &num_channels, 0);
MODULE_PARM_DESC(channels_map, "HSI channels to be probed");
dev_t hsi_char_dev;
@@ -368,10 +368,15 @@ static long hsi_char_ioctl(struct file *file,
if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
ret = -EFAULT;
else
- if_hsi_set_wakeline(ch, state);
+ if_hsi_set_acwakeline(ch, state);
break;
case CS_GET_ACWAKELINE:
- if_hsi_get_wakeline(ch, &state);
+ if_hsi_get_acwakeline(ch, &state);
+ if (copy_to_user((void __user *)arg, &state, sizeof(state)))
+ ret = -EFAULT;
+ break;
+ case CS_GET_CAWAKELINE:
+ if_hsi_get_cawakeline(ch, &state);
if (copy_to_user((void __user *)arg, &state, sizeof(state)))
ret = -EFAULT;
break;
@@ -416,17 +421,24 @@ static long hsi_char_ioctl(struct file *file,
static int hsi_char_open(struct inode *inode, struct file *file)
{
int ret = 0, ch = iminor(inode);
+ int i;
- pr_debug("%s, ch = %d, channels_map[%d] = %d\n", __func__, ch, ch,
- channels_map[ch]);
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++)
+ if ((channels_map[i] - 1) == ch)
+ break;
- if (!channels_map[ch])
+ if (i == HSI_MAX_CHAR_DEVS) {
+ pr_err("HSI char open: Channel %d not found\n", ch);
return -ENODEV;
+ }
+
+ pr_debug("HSI char open: opening channel %d\n", ch);
spin_lock_bh(&hsi_char_data[ch].lock);
if (hsi_char_data[ch].opened) {
spin_unlock_bh(&hsi_char_data[ch].lock);
+ pr_err("HSI char open: Channel %d already opened\n", ch);
return -EBUSY;
}
@@ -491,6 +503,7 @@ static int __init hsi_char_init(void)
int ret, i;
pr_info("HSI character device version " DRIVER_VERSION "\n");
+ pr_info("HSI char driver: %d channels mapped\n", num_channels);
for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
init_waitqueue_head(&hsi_char_data[i].rx_wait);
@@ -504,7 +517,7 @@ static int __init hsi_char_init(void)
/*printk(KERN_DEBUG "%s, devname = %s\n", __func__, devname); */
- ret = if_hsi_init(port, channels_map);
+ ret = if_hsi_init(port, channels_map, num_channels);
if (ret)
return ret;
@@ -517,7 +530,11 @@ static int __init hsi_char_init(void)
}
cdev_init(&hsi_char_cdev, &hsi_char_fops);
- cdev_add(&hsi_char_cdev, hsi_char_dev, HSI_MAX_CHAR_DEVS);
+ ret = cdev_add(&hsi_char_cdev, hsi_char_dev, HSI_MAX_CHAR_DEVS);
+ if (ret < 0) {
+ pr_err("HSI character device: Failed to add char device\n");
+ return ret;
+ }
return 0;
}
diff --git a/drivers/omap_hsi/hsi-char.h b/drivers/omap_hsi/hsi-char.h
index cdc220b..c4b1c4c 100644
--- a/drivers/omap_hsi/hsi-char.h
+++ b/drivers/omap_hsi/hsi-char.h
@@ -24,7 +24,11 @@
#include "hsi-if.h"
/* how many char devices would be created at most */
-#define HSI_MAX_CHAR_DEVS 8
+#define HSI_MAX_CHAR_DEVS 16
+
+/* Max HSI channel id allowed to be handled as char device. */
+/* Current range [1, 16] */
+#define HSI_MAX_CHAR_DEV_ID 16
void if_hsi_notify(int ch, struct hsi_event *ev);
diff --git a/drivers/omap_hsi/hsi-if.c b/drivers/omap_hsi/hsi-if.c
index e7a74a6..5228b6a 100644
--- a/drivers/omap_hsi/hsi-if.c
+++ b/drivers/omap_hsi/hsi-if.c
@@ -240,14 +240,14 @@ void if_hsi_flush_tx(int ch)
hsi_ioctl(channel->dev, HSI_IOCTL_FLUSH_TX, NULL);
}
-void if_hsi_get_wakeline(int ch, unsigned int *state)
+void if_hsi_get_acwakeline(int ch, unsigned int *state)
{
struct if_hsi_channel *channel;
channel = &hsi_iface.channels[ch];
hsi_ioctl(channel->dev, HSI_IOCTL_GET_ACWAKE, state);
}
-void if_hsi_set_wakeline(int ch, unsigned int state)
+void if_hsi_set_acwakeline(int ch, unsigned int state)
{
struct if_hsi_channel *channel;
channel = &hsi_iface.channels[ch];
@@ -255,6 +255,13 @@ void if_hsi_set_wakeline(int ch, unsigned int state)
state ? HSI_IOCTL_ACWAKE_UP : HSI_IOCTL_ACWAKE_DOWN, NULL);
}
+void if_hsi_get_cawakeline(int ch, unsigned int *state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_CAWAKE, state);
+}
+
int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg)
{
int ret;
@@ -482,7 +489,7 @@ static int __devinit if_hsi_probe(struct hsi_device *dev)
if (port == HSI_MAX_PORTS)
return -ENXIO;
- if (dev->n_ch >= HSI_MAX_CHAR_DEVS) {
+ if (dev->n_ch >= HSI_MAX_CHAR_DEV_ID) {
pr_err("HSI char driver cannot handle channel %d\n", dev->n_ch);
return -ENXIO;
}
@@ -581,7 +588,8 @@ static void if_hsi_port_event(struct hsi_device *dev, unsigned int event,
}
}
-int __init if_hsi_init(unsigned int port, unsigned int *channels_map)
+int __init if_hsi_init(unsigned int port, unsigned int *channels_map,
+ unsigned int num_channels)
{
struct if_hsi_channel *channel;
int i, ret = 0;
@@ -607,10 +615,10 @@ int __init if_hsi_init(unsigned int port, unsigned int *channels_map)
spin_lock_init(&channel->lock);
}
- for (i = 0; (i < HSI_MAX_CHAR_DEVS) && channels_map[i]; i++) {
+ for (i = 0; (i < num_channels) && channels_map[i]; i++) {
pr_debug("%s, port = %d, channels_map[i] = %d\n", __func__,
port, channels_map[i]);
- if ((channels_map[i] - 1) < HSI_MAX_CHAR_DEVS)
+ if ((channels_map[i] - 1) < HSI_MAX_CHAR_DEV_ID)
if_hsi_char_driver.ch_mask[port] |=
(1 << ((channels_map[i] - 1)));
else {
@@ -655,7 +663,7 @@ int __devexit if_hsi_exit(void)
for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
channel = &hsi_iface.channels[i];
if (channel->opened) {
- if_hsi_set_wakeline(i, HSI_IOCTL_ACWAKE_DOWN);
+ if_hsi_set_acwakeline(i, HSI_IOCTL_ACWAKE_DOWN);
if_hsi_closechannel(channel);
}
}
diff --git a/drivers/omap_hsi/hsi-if.h b/drivers/omap_hsi/hsi-if.h
index 1ce8525..96afdd4 100644
--- a/drivers/omap_hsi/hsi-if.h
+++ b/drivers/omap_hsi/hsi-if.h
@@ -38,7 +38,8 @@ struct hsi_event {
unsigned int count;
};
-int if_hsi_init(unsigned int port, unsigned int *channels_map);
+int if_hsi_init(unsigned int port, unsigned int *channels_map,
+ unsigned int num_channels);
int if_hsi_exit(void);
int if_hsi_start(int ch);
@@ -48,8 +49,9 @@ void if_hsi_send_break(int ch);
void if_hsi_flush_rx(int ch);
void if_hsi_flush_tx(int ch);
void if_hsi_bootstrap(int ch);
-void if_hsi_set_wakeline(int ch, unsigned int state);
-void if_hsi_get_wakeline(int ch, unsigned int *state);
+void if_hsi_set_acwakeline(int ch, unsigned int state);
+void if_hsi_get_acwakeline(int ch, unsigned int *state);
+void if_hsi_get_cawakeline(int ch, unsigned int *state);
int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg);
void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg);
int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg);
diff --git a/drivers/omap_hsi/hsi_driver.c b/drivers/omap_hsi/hsi_driver.c
index 1ae37e2..485e40a 100644
--- a/drivers/omap_hsi/hsi_driver.c
+++ b/drivers/omap_hsi/hsi_driver.c
@@ -117,6 +117,7 @@ void hsi_restore_ctx(struct hsi_dev *hsi_ctrl)
hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port));
hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port));
+ /* HSR */
if (!hsi_driver_device_is_hsi(pdev))
hsi_outl(p->hsr.frame_size, base,
HSI_HSR_FRAMESIZE_REG(port));
@@ -130,11 +131,10 @@ void hsi_restore_ctx(struct hsi_dev *hsi_ctrl)
hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT);
}
- /* Patch 110331 : Narasimha */
+ /* As a last step move HSR from MODE_VAL.SLEEP to the relevant mode. */
+ /* This will enable the ACREADY flow control mechanism. */
for (port = 1; port <= pdata->num_ports; port++) {
p = &pdata->ctx->pctx[port - 1];
-
- /* HSR */
hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port));
}
}
@@ -202,8 +202,8 @@ static int __init reg_hsi_dev_ch(struct hsi_dev *hsi_ctrl, unsigned int p,
dev_set_name(&dev->device, "omap_hsi%d-p%u.c%u", dev->n_ctrl, p,
ch);
- pr_debug
- ("HSI DRIVER : reg_hsi_dev_ch, port %d, ch %d, hsi_ctrl->dev:0x%x,"
+ dev_dbg(hsi_ctrl->dev,
+ "reg_hsi_dev_ch, port %d, ch %d, hsi_ctrl->dev:0x%x,"
"&dev->device:0x%x\n",
p, ch, (unsigned int)hsi_ctrl->dev, (unsigned int)&dev->device);
@@ -262,12 +262,17 @@ void hsi_set_pm_default(struct hsi_dev *hsi_ctrl)
/* HSI_TODO : use the HWMOD API : omap_hwmod_set_slave_idlemode() */
}
+
void hsi_set_pm_force_hsi_on(struct hsi_dev *hsi_ctrl)
{
+ /* Force HSI to ON by never acknowledging a PRCM idle request */
+ /* SIdleAck and MStandby are never asserted */
hsi_outl((HSI_AUTOIDLE | HSI_SIDLEMODE_NO |
HSI_MIDLEMODE_NO),
hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG);
hsi_outl(HSI_CLK_AUTOGATING_ON, hsi_ctrl->base, HSI_GDD_GCR_REG);
+
+ /* HSI_TODO : use the HWMOD API : omap_hwmod_set_slave_idlemode() */
}
int hsi_softreset(struct hsi_dev *hsi_ctrl)
@@ -633,6 +638,12 @@ void hsi_clocks_disable_channel(struct device *dev, u8 channel_number,
if (hsi_is_hst_controller_busy(hsi_ctrl))
dev_dbg(dev, "Disabling clocks with HST FSM not IDLE !\n");
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Allow Fclk to change */
+ if (dpll_cascading_blocker_release(dev) < 0)
+ dev_warn(dev, "Error releasing DPLL cascading constraint\n");
+#endif
+
#ifndef USE_PM_RUNTIME_FOR_HSI
hsi_runtime_suspend(dev);
omap_device_idle(pd);
@@ -673,6 +684,12 @@ int hsi_clocks_enable_channel(struct device *dev, u8 channel_number,
return -EEXIST;
}
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Prevent Fclk change */
+ if (dpll_cascading_blocker_hold(dev) < 0)
+ dev_warn(dev, "Error holding DPLL cascading constraint\n");
+#endif
+
#ifndef USE_PM_RUNTIME_FOR_HSI
omap_device_enable(pd);
hsi_runtime_resume(dev);
@@ -756,7 +773,6 @@ static void hsi_controller_exit(struct hsi_dev *hsi_ctrl)
hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p);
}
-
/* HSI Platform Device probing & hsi_device registration */
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
@@ -771,7 +787,7 @@ static int __init hsi_platform_device_probe(struct platform_device *pd)
hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");
if (!pdata) {
- pr_err(LOG_NAME "No platform_data found on hsi device\n");
+ dev_err(&pd->dev, "No platform_data found on hsi device\n");
return -ENXIO;
}
@@ -830,10 +846,16 @@ static int __init hsi_platform_device_probe(struct platform_device *pd)
}
/* Allow HSI to wake up the platform */
- device_init_wakeup(hsi_ctrl->dev, 1);
-
- /* From here no need for HSI HW access */
- hsi_clocks_disable(hsi_ctrl->dev, __func__);
+ device_init_wakeup(hsi_ctrl->dev, true);
+
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Set the HSI FCLK to default. */
+ err = omap_device_set_rate(hsi_ctrl->dev, hsi_ctrl->dev,
+ pdata->default_hsi_fclk);
+ if (err)
+ dev_err(&pd->dev, "Cannot set HSI FClk to default value: %ld\n",
+ pdata->default_hsi_fclk);
+#endif
/* From here no need for HSI HW access */
hsi_clocks_disable(hsi_ctrl->dev, __func__);
@@ -878,18 +900,47 @@ static int __exit hsi_platform_device_remove(struct platform_device *pd)
#ifdef CONFIG_SUSPEND
static int hsi_suspend_noirq(struct device *dev)
{
+ struct hsi_platform_data *pdata = dev->platform_data;
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
dev_dbg(dev, "%s\n", __func__);
- /* HSI_TODO : missing the SUSPEND feature */
+ /* If HSI is enabled, CAWAKE IO wakeup has been disabled and */
+ /* we don't want to re-enable it here. HSI interrupt shall be */
+ /* generated normally because HSI HW is ON. */
+ if (hsi_ctrl->clock_enabled) {
+ dev_info(dev, "Platform Suspend while HSI active\n");
+ return 0;
+ }
+
+ /* Perform HSI board specific action before platform suspend */
+ if (pdata->board_suspend)
+ pdata->board_suspend(0, device_may_wakeup(dev));
return 0;
}
static int hsi_resume_noirq(struct device *dev)
{
+ struct hsi_platform_data *pdata = dev->platform_data;
+
dev_dbg(dev, "%s\n", __func__);
- /* HSI_TODO : missing the SUSPEND feature */
+ /* This function shall not schedule the tasklet, because it is */
+ /* redundant with what is already done in the PRCM interrupt handler. */
+ /* HSI IO checking in PRCM int handler is done when waking up from : */
+ /* - Device OFF mode (wake up from suspend) */
+ /* - L3INIT in RET (Idle mode) */
+ /* hsi_resume_noirq is called only when system wakes up from suspend. */
+ /* So HSI IO checking in PRCM int handler and hsi_resume_noirq are */
+ /* redundant. We need to choose which one will schedule the tasklet */
+ /* Since HSI IO checking in PRCM int handler covers more cases, it is */
+ /* the winner. */
+
+ /* Perform (optional) HSI board specific action after platform wakeup */
+ if (pdata->board_resume)
+ pdata->board_resume(0);
return 0;
}
@@ -917,7 +968,9 @@ int hsi_runtime_resume(struct device *dev)
/* Restore context */
hsi_restore_ctx(hsi_ctrl);
- pdata->wakeup_disable(hsi_ctrl, 0);
+ /* When HSI is ON, no need for IO wakeup mechanism */
+ pdata->wakeup_disable(0);
+
/* HSI device is now fully operational and _must_ be able to */
/* complete I/O operations */
@@ -948,15 +1001,17 @@ int hsi_runtime_suspend(struct device *dev)
hsi_ctrl->clock_enabled = false;
+ /* Put HSR into SLEEP mode to force ACREADY to low while HSI is idle */
for (port = 1; port <= pdata->num_ports; port++) {
hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, hsi_ctrl->base,
HSI_HSR_MODE_REG(port));
}
+ /* HSI is going to INA/RET/OFF, it needs IO wakeup mechanism enabled */
if (device_may_wakeup(dev))
- pdata->wakeup_enable(hsi_ctrl, 0);
+ pdata->wakeup_enable(0);
else
- pdata->wakeup_disable(hsi_ctrl, 0);
+ pdata->wakeup_disable(0);
/* HSI is now ready to be put in low power state */
@@ -1044,10 +1099,15 @@ static int __init hsi_driver_init(void)
{
int err = 0;
- pr_info("HSI DRIVER Version " HSI_DRIVER_VERSION "\n");
+ pr_info(LOG_NAME "HSI DRIVER Version " HSI_DRIVER_VERSION "\n");
/* Register the (virtual) HSI bus */
- hsi_bus_init();
+ err = hsi_bus_init();
+ if (err < 0) {
+ pr_err(LOG_NAME "HSI bus_register err %d\n", err);
+ return err;
+ }
+
err = hsi_debug_init();
if (err < 0) {
pr_err(LOG_NAME "HSI Debugfs failed %d\n", err);
@@ -1075,7 +1135,7 @@ static void __exit hsi_driver_exit(void)
hsi_debug_exit();
hsi_bus_exit();
- pr_info("HSI DRIVER removed\n");
+ pr_info(LOG_NAME "HSI DRIVER removed\n");
}
module_init(hsi_driver_init);
diff --git a/drivers/omap_hsi/hsi_driver.h b/drivers/omap_hsi/hsi_driver.h
index d5ca79c..0991d98 100644
--- a/drivers/omap_hsi/hsi_driver.h
+++ b/drivers/omap_hsi/hsi_driver.h
@@ -204,14 +204,21 @@ struct hsi_dev { /* HSI_TODO: should be later renamed into hsi_controller*/
#endif
struct device *dev;
};
+
+/**
+ * struct hsi_platform_data - Board specific data
+*/
struct hsi_platform_data {
void (*set_min_bus_tput) (struct device *dev, u8 agent_id,
unsigned long r);
int (*device_enable) (struct platform_device *pdev);
int (*device_shutdown) (struct platform_device *pdev);
int (*device_idle) (struct platform_device *pdev);
- int (*wakeup_enable) (struct hsi_dev *hsi_ctrl, int hsi_port);
- int (*wakeup_disable) (struct hsi_dev *hsi_ctrl, int hsi_port);
+ int (*wakeup_enable) (int hsi_port);
+ int (*wakeup_disable) (int hsi_port);
+ int (*wakeup_is_from_hsi) (void);
+ int (*board_suspend)(int hsi_port, bool dev_may_wakeup);
+ int (*board_resume)(int hsi_port);
u8 num_ports;
struct ctrl_ctx *ctx;
u8 hsi_gdd_chan_count;
@@ -220,7 +227,7 @@ struct hsi_platform_data {
/* HSI Bus */
extern struct bus_type hsi_bus_type;
-void do_hsi_tasklet(unsigned long hsi_port);
+
int hsi_port_event_handler(struct hsi_port *p, unsigned int event, void *arg);
int hsi_bus_init(void);
void hsi_bus_exit(void);
diff --git a/drivers/omap_hsi/hsi_driver_dma.c b/drivers/omap_hsi/hsi_driver_dma.c
index f53dc95..ad819f5 100644
--- a/drivers/omap_hsi/hsi_driver_dma.c
+++ b/drivers/omap_hsi/hsi_driver_dma.c
@@ -53,6 +53,8 @@ bool hsi_is_dma_read_int_pending(struct hsi_dev *hsi_ctrl)
status_reg &= hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
if (!status_reg)
return false;
+
+ /* Scan all enabled DMA channels */
for (gdd_lch = 0; gdd_lch < hsi_ctrl->gdd_chan_count; gdd_lch++) {
if (!(status_reg & HSI_GDD_LCH(gdd_lch)))
continue;
@@ -136,6 +138,10 @@ int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data,
sync = hsi_sync_table[HSI_SYNC_WRITE][port - 1][channel];
src_addr = dma_map_single(hsi_ctrl->dev, data, size * 4, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hsi_ctrl->dev, src_addr))) {
+ dev_err(hsi_ctrl->dev, "Failed to create DMA write mapping.\n");
+ return -ENOMEM;
+ }
tmp = HSI_SRC_SINGLE_ACCESS0 |
HSI_SRC_MEMORY_PORT |
@@ -169,6 +175,8 @@ int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data,
hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch));
hsi_outw(size, base, HSI_GDD_CEN_REG(lch));
+ /* TODO : Need to clean interrupt status here to avoid spurious int */
+
hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));
@@ -213,7 +221,7 @@ int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
/* When DMA is used for Rx, disable the Rx Interrupt.
* (else DATAAVAILLABLE event would get triggered on first
* received data word)
- * (By default, Rx interrupt is active for polling feature)
+ * (Rx interrupt might be active for polling feature)
*/
#if 0
if (omap_readl(0x4A05A810)) {
@@ -235,6 +243,10 @@ int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
dest_addr = dma_map_single(hsi_ctrl->dev, data, count * 4,
DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hsi_ctrl->dev, dest_addr))) {
+ dev_err(hsi_ctrl->dev, "Failed to create DMA read mapping.\n");
+ return -ENOMEM;
+ }
tmp = HSI_DST_SINGLE_ACCESS0 |
HSI_DST_MEMORY_PORT |
@@ -268,12 +280,28 @@ int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch));
hsi_outw(count, base, HSI_GDD_CEN_REG(lch));
+ /* TODO : Need to clean interrupt status here to avoid spurious int */
+
hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));
return 0;
}
+/**
+ * hsi_driver_cancel_write_dma - Cancel an ongoing GDD [DMA] write for the
+ * specified hsi channel.
+ * @hsi_ch - pointer to the hsi_channel to cancel DMA write.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : DMA cancel success, data not transfered to TX FIFO
+ * 0 : DMA transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch)
{
int lch = hsi_ch->write_data.lch;
@@ -283,6 +311,9 @@ int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch)
u16 ccr, gdd_csr;
long buff_offset;
u32 status_reg;
+ dma_addr_t dma_h;
+ size_t size;
+
dev_err(&hsi_ch->dev->device, "hsi_driver_cancel_write_dma( "
"channel %d\n", hsi_ch->channel_number);
@@ -300,12 +331,20 @@ int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch)
status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+
+ /* Clear CSR register by reading it, as it is cleared automaticaly */
+ /* by HW after SW read. */
gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ /* Unmap DMA region */
+ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CSSA_REG(lch));
+ size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_TO_DEVICE);
+
buff_offset = hsi_hst_bufstate_f_reg(hsi_ctrl, port, channel);
if (buff_offset >= 0)
hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), hsi_ctrl->base,
@@ -315,24 +354,42 @@ int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch)
return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
}
+/**
+ * hsi_driver_cancel_read_dma - Cancel an ongoing GDD [DMA] read for the
+ * specified hsi channel.
+ * @hsi_ch - pointer to the hsi_channel to cancel DMA read.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : DMA cancel success, data not available at expected
+ * address.
+ * 0 : DMA transfer is already over, data already available at
+ * expected address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
int hsi_driver_cancel_read_dma(struct hsi_channel *hsi_ch)
{
int lch = hsi_ch->read_data.lch;
struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller;
u16 ccr, gdd_csr;
u32 status_reg;
+ dma_addr_t dma_h;
+ size_t size;
+
dev_err(&hsi_ch->dev->device, "hsi_driver_cancel_read_dma "
"channel %d\n", hsi_ch->channel_number);
+ /* Re-enable interrupts for polling if needed */
+ if (hsi_ch->flags & HSI_CH_RX_POLL)
+ hsi_driver_enable_read_interrupt(hsi_ch, NULL);
+
if (lch < 0) {
dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI "
"channel %d\n", hsi_ch->channel_number);
return -ENXIO;
}
- /* DMA transfer is over, re-enable default mode
- * (Interrupts for polling feature)
- */
- hsi_driver_enable_read_interrupt(hsi_ch, NULL);
ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
if (!(ccr & HSI_CCR_ENABLE)) {
@@ -344,12 +401,20 @@ int hsi_driver_cancel_read_dma(struct hsi_channel *hsi_ch)
status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+
+ /* Clear CSR register by reading it, as it is cleared automaticaly */
+ /* by HW after SW read */
gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ /* Unmap DMA region - Access to the buffer is now safe */
+ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CDSA_REG(lch));
+ size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_FROM_DEVICE);
+
hsi_reset_ch_read(hsi_ch);
return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
}
@@ -422,6 +487,7 @@ static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
hsi_outl_and(~HSI_GDD_LCH(gdd_lch), base,
HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ /* Warning : CSR register is cleared automaticaly by HW after SW read */
gdd_csr = hsi_inw(base, HSI_GDD_CSR_REG(gdd_lch));
if (!(gdd_csr & HSI_CSR_TOUT)) {
@@ -434,10 +500,6 @@ static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
DMA_FROM_DEVICE);
ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
hsi_reset_ch_read(ch);
- /* DMA transfer is over, re-enable default mode
- * (interrupts for polling feature)
- */
- hsi_driver_enable_read_interrupt(ch, NULL);
dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback "
"(size %d).\n", channel, size/4);
@@ -465,6 +527,9 @@ static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
fifo, fifo_words_avail,
HSI_HSR_FIFO_SIZE);
}
+ /* Re-enable interrupts for polling if needed */
+ if (ch->flags & HSI_CH_RX_POLL)
+ hsi_driver_enable_read_interrupt(ch, NULL);
} else { /* Write path */
dma_h = hsi_inl(base, HSI_GDD_CSSA_REG(gdd_lch));
size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
@@ -483,6 +548,7 @@ static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
dev_err(hsi_ctrl->dev, "Time-out overflow Error on GDD transfer"
" on gdd channel %d\n", gdd_lch);
spin_unlock(&hsi_ctrl->lock);
+ /* TODO : need to perform a DMA soft reset */
hsi_port_event_handler(&hsi_ctrl->hsi_port[port - 1],
HSI_EVENT_ERROR, NULL);
spin_lock(&hsi_ctrl->lock);
@@ -511,6 +577,7 @@ static u32 hsi_process_dma_event(struct hsi_dev *hsi_ctrl)
}
}
+ /* Acknowledge interrupt for DMA channel */
hsi_outl(lch_served, base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
@@ -571,6 +638,6 @@ int __init hsi_gdd_init(struct hsi_dev *hsi_ctrl, const char *irq_name)
void hsi_gdd_exit(struct hsi_dev *hsi_ctrl)
{
- tasklet_disable(&hsi_ctrl->hsi_gdd_tasklet);
+ tasklet_kill(&hsi_ctrl->hsi_gdd_tasklet);
free_irq(hsi_ctrl->gdd_irq, hsi_ctrl);
}
diff --git a/drivers/omap_hsi/hsi_driver_fifo.c b/drivers/omap_hsi/hsi_driver_fifo.c
index f6ebc18..aa33a1a 100644
--- a/drivers/omap_hsi/hsi_driver_fifo.c
+++ b/drivers/omap_hsi/hsi_driver_fifo.c
@@ -40,7 +40,8 @@ int hsi_fifo_get_id(struct hsi_dev *hsi_ctrl, unsigned int channel,
int fifo_index = 0;
int err = 0;
- if (unlikely(channel >= HSI_CHANNELS_MAX || port < 1 || port > 2)) {
+ if (unlikely((channel >= HSI_CHANNELS_MAX) || (port < 1) ||
+ (port > 2))) {
err = -EINVAL;
goto fifo_id_bk;
}
diff --git a/drivers/omap_hsi/hsi_driver_if.c b/drivers/omap_hsi/hsi_driver_if.c
index 1183b97..19012e5 100644
--- a/drivers/omap_hsi/hsi_driver_if.c
+++ b/drivers/omap_hsi/hsi_driver_if.c
@@ -289,6 +289,7 @@ int hsi_open(struct hsi_device *dev)
return -EBUSY;
}
+ /* Restart with flags cleaned up */
ch->flags = HSI_CH_OPEN;
hsi_driver_enable_interrupt(port, HSI_CAWAKEDETECTED | HSI_ERROROCCURED
@@ -322,7 +323,7 @@ int hsi_write(struct hsi_device *dev, u32 *addr, unsigned int size)
int err;
if (unlikely(!dev)) {
- pr_err("Null dev pointer in hsi_write\n");
+ pr_err(LOG_NAME "Null dev pointer in hsi_write\n");
return -EINVAL;
}
@@ -404,7 +405,7 @@ int hsi_read(struct hsi_device *dev, u32 *addr, unsigned int size)
int err;
if (unlikely(!dev)) {
- pr_err("Null dev pointer in hsi_read\n");
+ pr_err(LOG_NAME "Null dev pointer in hsi_read\n");
return -EINVAL;
}
@@ -486,7 +487,14 @@ int __hsi_write_cancel(struct hsi_channel *ch)
* hsi_write_cancel - Cancel pending write request.
* @dev - hsi device channel where to cancel the pending write.
*
- * write_done() callback will not be called after sucess of this function.
+ * write_done() callback will not be called after success of this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : write cancel success, data not transfered to TX FIFO
+ * 0 : transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
*/
int hsi_write_cancel(struct hsi_device *dev)
{
@@ -534,7 +542,15 @@ int __hsi_read_cancel(struct hsi_channel *ch)
* hsi_read_cancel - Cancel pending read request.
* @dev - hsi device channel where to cancel the pending read.
*
- * read_done() callback will not be called after sucess of this function.
+ * read_done() callback will not be called after success of this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : read cancel success, data not available at expected
+ * address.
+ * 0 : transfer is already over, data already available at expected
+ * address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
*/
int hsi_read_cancel(struct hsi_device *dev)
{
diff --git a/drivers/omap_hsi/hsi_driver_int.c b/drivers/omap_hsi/hsi_driver_int.c
index a70f1f2..ce67e5f 100644
--- a/drivers/omap_hsi/hsi_driver_int.c
+++ b/drivers/omap_hsi/hsi_driver_int.c
@@ -51,7 +51,6 @@ bool hsi_is_channel_busy(struct hsi_channel *ch)
/* Check if a HSI port is busy :
* - data transfer (Write) is ongoing for a given HSI channel
- * - ACWAKE is high
* - CAWAKE is high
* - Currently in HSI interrupt tasklet
* - Currently in HSI CAWAKE tasklet (for SSI)
@@ -177,6 +176,16 @@ int hsi_driver_enable_read_interrupt(struct hsi_channel *ch, u32 * data)
return 0;
}
+/**
+ * hsi_driver_cancel_write_interrupt - Cancel pending write interrupt.
+ * @dev - hsi device channel where to cancel the pending interrupt.
+ *
+ * Return: -ECANCELED : write cancel success, data not transfered to TX FIFO
+ * 0 : transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch)
{
struct hsi_port *p = ch->hsi_port;
@@ -190,7 +199,7 @@ int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch)
HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
if (!(status_reg & HSI_HST_DATAACCEPT(channel))) {
- dev_warn(&ch->dev->device, "Write cancel on not "
+ dev_dbg(&ch->dev->device, "Write cancel on not "
"enabled channel %d ENABLE REG 0x%08X", channel,
status_reg);
}
@@ -207,6 +216,17 @@ int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch)
return status_reg & HSI_HST_DATAACCEPT(channel) ? 0 : -ECANCELED;
}
+/**
+ * hsi_driver_cancel_read_interrupt - Cancel pending read interrupt.
+ * @dev - hsi device channel where to cancel the pending interrupt.
+ *
+ * Return: -ECANCELED : read cancel success data not available at expected
+ * address.
+ * 0 : transfer is already over, data already available at expected
+ * address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
int hsi_driver_cancel_read_interrupt(struct hsi_channel *ch)
{
struct hsi_port *p = ch->hsi_port;
@@ -218,7 +238,7 @@ int hsi_driver_cancel_read_interrupt(struct hsi_channel *ch)
status_reg = hsi_inl(base,
HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
if (!(status_reg & HSI_HSR_DATAAVAILABLE(channel))) {
- dev_warn(&ch->dev->device, "Read cancel on not "
+ dev_dbg(&ch->dev->device, "Read cancel on not "
"enabled channel %d ENABLE REG 0x%08X", channel,
status_reg);
}
@@ -386,7 +406,19 @@ done:
}
}
-/* CAWAKE line management */
+/**
+ * hsi_do_cawake_process - CAWAKE line management
+ * @pport - HSI port to process
+ *
+ * This function handles the CAWAKE L/H transitions and call the event callback
+ * accordingly.
+ *
+ * Returns 0 if CAWAKE event process, -EAGAIN if CAWAKE event processing is
+ * delayed due to a pending DMA interrupt.
+ * If -EAGAIN is returned, pport->hsi_tasklet has to be re-scheduled once
+ * DMA tasklet has be executed. This should be done automatically by driver.
+ *
+*/
int hsi_do_cawake_process(struct hsi_port *pport)
{
struct hsi_dev *hsi_ctrl = pport->hsi_controller;
@@ -404,9 +436,8 @@ int hsi_do_cawake_process(struct hsi_port *pport)
/* Check CAWAKE line status */
if (cawake_status) {
-#if 0
- dev_info(hsi_ctrl->dev, "CAWAKE rising edge detected\n");
-#endif
+ dev_dbg(hsi_ctrl->dev, "CAWAKE rising edge detected\n");
+
/* Check for possible mismatch (race condition) */
if (unlikely(pport->cawake_status)) {
dev_warn(hsi_ctrl->dev,
@@ -424,18 +455,23 @@ int hsi_do_cawake_process(struct hsi_port *pport)
omap_writel(0x003F0700, 0x4A306400);
omap_writel(0x00000003, 0x4A004400);
}
+ /* Force HSI to ON_ACTIVE when CAWAKE is high */
hsi_set_pm_force_hsi_on(hsi_ctrl);
+ /* TODO: Use omap_pm_set_max_dev_wakeup_lat() to set latency */
+ /* constraint to prevent L3INIT to enter RET/OFF when CAWAKE */
+ /* is high */
+
spin_unlock(&hsi_ctrl->lock);
hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL);
spin_lock(&hsi_ctrl->lock);
} else {
-#if 0
- dev_info(hsi_ctrl->dev, "CAWAKE falling edge detected\n");
-#endif
+ dev_dbg(hsi_ctrl->dev, "CAWAKE falling edge detected\n");
+
+ /* Check for pending DMA interrupt */
if (hsi_is_dma_read_int_pending(hsi_ctrl)) {
- dev_warn(hsi_ctrl->dev,
- "Pending DMA Read interrupt before CAWAKE->L, "
- "exiting Interrupt tasklet.\n");
+ dev_dbg(hsi_ctrl->dev, "Pending DMA Read interrupt "
+ "before CAWAKE->L, exiting "
+ "Interrupt tasklet.\n");
return -EAGAIN;
}
if (unlikely(!pport->cawake_status)) {
@@ -449,7 +485,12 @@ int hsi_do_cawake_process(struct hsi_port *pport)
}
pport->cawake_status = 0;
+ /* Allow HSI HW to enter IDLE when CAWAKE is low */
hsi_set_pm_default(hsi_ctrl);
+ /* TODO: Use omap_pm_set_max_dev_wakeup_lat() to release */
+ /* latency constraint to prevent L3INIT to enter RET/OFF when */
+ /* CAWAKE is low */
+
spin_unlock(&hsi_ctrl->lock);
hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL);
spin_lock(&hsi_ctrl->lock);
@@ -537,8 +578,7 @@ static u32 hsi_driver_int_proc(struct hsi_port *pport,
channels_served |= HSI_ERROROCCURED;
}
-
- for (channel = start; channel < stop; channel++) {
+ for (channel = start; channel <= stop; channel++) {
if (status_reg & HSI_HST_DATAACCEPT(channel)) {
hsi_do_channel_tx(&pport->hsi_channel[channel]);
channels_served |= HSI_HST_DATAACCEPT(channel);
@@ -555,6 +595,8 @@ static u32 hsi_driver_int_proc(struct hsi_port *pport,
"Data overrun in real time mode !\n");
}
}
+
+ /* CAWAKE falling or rising edge detected */
if ((status_reg & HSI_CAWAKEDETECTED) || pport->cawake_off_event) {
if (hsi_do_cawake_process(pport) == -EAGAIN)
goto proc_done;
@@ -579,19 +621,20 @@ static u32 hsi_process_int_event(struct hsi_port *pport)
status_reg = hsi_driver_int_proc(pport,
HSI_SYS_MPU_STATUS_REG(port, irq),
HSI_SYS_MPU_ENABLE_REG(port, irq),
- 0, min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX));
+ 0,
+ min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX) - 1);
/* Process events for channels 8..15 */
if (pport->max_ch > HSI_SSI_CHANNELS_MAX)
status_reg |= hsi_driver_int_proc(pport,
HSI_SYS_MPU_U_STATUS_REG(port, irq),
HSI_SYS_MPU_U_ENABLE_REG(port, irq),
- HSI_SSI_CHANNELS_MAX, pport->max_ch);
+ HSI_SSI_CHANNELS_MAX, pport->max_ch - 1);
return status_reg;
}
-void do_hsi_tasklet(unsigned long hsi_port)
+static void do_hsi_tasklet(unsigned long hsi_port)
{
struct hsi_port *pport = (struct hsi_port *)hsi_port;
struct hsi_dev *hsi_ctrl = pport->hsi_controller;
@@ -669,6 +712,6 @@ int __init hsi_mpu_init(struct hsi_port *hsi_p, const char *irq_name)
void hsi_mpu_exit(struct hsi_port *hsi_p)
{
- tasklet_disable(&hsi_p->hsi_tasklet);
+ tasklet_kill(&hsi_p->hsi_tasklet);
free_irq(hsi_p->irq, hsi_p);
}