aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap')
-rw-r--r--arch/arm/plat-omap/Kconfig73
-rw-r--r--arch/arm/plat-omap/Makefile6
-rw-r--r--arch/arm/plat-omap/clock.c46
-rw-r--r--arch/arm/plat-omap/common.c3
-rw-r--r--arch/arm/plat-omap/counter_32k.c58
-rw-r--r--arch/arm/plat-omap/cpu-omap.c171
-rw-r--r--arch/arm/plat-omap/devices.c111
-rw-r--r--arch/arm/plat-omap/dma.c18
-rw-r--r--arch/arm/plat-omap/dmtimer.c927
-rw-r--r--arch/arm/plat-omap/i2c.c14
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev_omap.h2
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h4
-rw-r--r--arch/arm/plat-omap/include/plat/common.h2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h39
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h97
-rw-r--r--arch/arm/plat-omap/include/plat/dsp.h4
-rw-r--r--arch/arm/plat-omap/include/plat/dsscomp.h25
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h56
-rw-r--r--arch/arm/plat-omap/include/plat/gpmc.h4
-rw-r--r--arch/arm/plat-omap/include/plat/gpu.h39
-rw-r--r--arch/arm/plat-omap/include/plat/io.h4
-rw-r--r--arch/arm/plat-omap/include/plat/iommu.h13
-rw-r--r--arch/arm/plat-omap/include/plat/iommu2.h1
-rw-r--r--arch/arm/plat-omap/include/plat/irqs-44xx.h9
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h12
-rw-r--r--arch/arm/plat-omap/include/plat/mcasp.h52
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h6
-rw-r--r--arch/arm/plat-omap/include/plat/mcpdm.h28
-rw-r--r--arch/arm/plat-omap/include/plat/mcspi.h2
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h12
-rw-r--r--arch/arm/plat-omap/include/plat/omap-pm.h50
-rw-r--r--arch/arm/plat-omap/include/plat/omap-serial.h71
-rw-r--r--arch/arm/plat-omap/include/plat/omap44xx.h7
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h2
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hsi.h494
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h26
-rw-r--r--arch/arm/plat-omap/include/plat/remoteproc.h151
-rw-r--r--arch/arm/plat-omap/include/plat/rpmsg.h68
-rw-r--r--arch/arm/plat-omap/include/plat/rpres.h57
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h22
-rw-r--r--arch/arm/plat-omap/include/plat/sram.h1
-rw-r--r--arch/arm/plat-omap/include/plat/temperature_sensor.h65
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h18
-rw-r--r--arch/arm/plat-omap/iommu.c184
-rw-r--r--arch/arm/plat-omap/iovmm.c4
-rw-r--r--arch/arm/plat-omap/mailbox.c43
-rw-r--r--arch/arm/plat-omap/mcbsp.c107
-rw-r--r--arch/arm/plat-omap/omap-pm-helper.c345
-rw-r--r--arch/arm/plat-omap/omap-pm-helper.h40
-rw-r--r--arch/arm/plat-omap/omap-pm-interface.c251
-rw-r--r--arch/arm/plat-omap/omap-pm-noop.c363
-rw-r--r--arch/arm/plat-omap/omap_device.c10
-rw-r--r--arch/arm/plat-omap/omap_rpmsg.c601
-rw-r--r--arch/arm/plat-omap/rproc_user.c185
-rw-r--r--arch/arm/plat-omap/sram.c67
55 files changed, 3868 insertions, 1202 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 49a4c75..ef3763c 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -38,6 +38,29 @@ config OMAP_DEBUG_LEDS
depends on OMAP_DEBUG_DEVICES
default y if LEDS_CLASS
+config OMAP_RPMSG
+ tristate "OMAP Virtio-based remote processor messaging support"
+ depends on ARCH_OMAP4
+ default y
+ select VIRTIO
+ select VIRTIO_RING
+ select OMAP_MBOX_FWK
+ help
+ Say Y if you want to enable OMAP virtio-based remote-processor
+ communication, currently only available with OMAP4. This is required
+ for offloading tasks to the remote on-chip M3s or C64x+ dsp,
+ usually used by multimedia frameworks to offload cpu-intensive and/or
+ latency-sensitive tasks.
+
+config OMAP_RPMSG_RECOVERY
+ bool "OMAP RPMSG recovery"
+ default y
+ depends on OMAP_RPMSG
+ help
+ Say Y if you want RPMSG to reset the rpmsg channels after a fatal
+ error in remote proc. That way it will restart all the channels and
+ the remote processor, causing a clean restart.
+
config OMAP_SMARTREFLEX
bool "SmartReflex support"
depends on (ARCH_OMAP3 || ARCH_OMAP4) && PM
@@ -69,6 +92,26 @@ config OMAP_SMARTREFLEX_CLASS3
Class 3 implementation of Smartreflex employs continuous hardware
voltage calibration.
+config OMAP_SMARTREFLEX_CLASS1P5
+ bool "Class 1.5 mode of Smartreflex Implementation"
+ depends on OMAP_SMARTREFLEX
+ help
+ Say Y to enable Class 1.5 implementation of Smartreflex.
+
+ Class 1.5 implementation of Smartreflex employs software controlled
+ hardware voltage calibration.
+
+config OMAP_SR_CLASS1P5_RECALIBRATION_DELAY
+ int "Class 1.5 mode recalibration recalibration delay(ms)"
+ depends on OMAP_SMARTREFLEX_CLASS1P5
+ default 86400000
+ help
+ Setup the recalibration delay in milliseconds.
+
+ Use 0 for never doing a recalibration (operates in AVS Class 1 mode).
+ Defaults to recommended recalibration every 24hrs.
+ If you do not understand this, use the default.
+
config OMAP_RESET_CLOCKS
bool "Reset unused clocks during boot"
depends on ARCH_OMAP
@@ -116,7 +159,7 @@ config OMAP_MCBSP
Buffered Serial Port.
config OMAP_MBOX_FWK
- tristate "Mailbox framework support"
+ bool "Mailbox framework support"
depends on ARCH_OMAP
help
Say Y here if you want to use OMAP Mailbox framework support for
@@ -132,10 +175,10 @@ config OMAP_MBOX_KFIFO_SIZE
module parameter).
config OMAP_IOMMU
- tristate
+ bool "IOMMU support for OMAP devices"
config OMAP_IOMMU_DEBUG
- tristate "Export OMAP IOMMU internals in DebugFS"
+ bool "Export OMAP IOMMU internals in DebugFS"
depends on OMAP_IOMMU && DEBUG_FS
help
Select this to see extensive information about
@@ -206,10 +249,29 @@ config OMAP_SERIAL_WAKE
to data on the serial RX line. This allows you to wake the
system from serial console.
+config OMAP_TEMP_SENSOR
+ bool "OMAP Temp Sensor Support"
+ depends on ARCH_OMAP4
+ default n
+ help
+ Say Y here if you want support for the temp sensor on OMAP4460.
+ This provides the temperature of the MPU
+ subsystem. Only one instance of on die temperature
+ sensor is present.
+
+# this carveout should probably become generic and not omap specific
+config OMAP_REMOTEPROC_MEMPOOL_SIZE
+ hex "Physical carveout memory pool size (Byte)"
+ depends on OMAP_REMOTE_PROC
+ default 0x700000
+ help
+ Allocate specified size of memory at boot time so we can ioremap
+ it safely.
+
choice
prompt "OMAP PM layer selection"
depends on ARCH_OMAP
- default OMAP_PM_NOOP
+ default OMAP_PM
config OMAP_PM_NONE
bool "No PM layer"
@@ -217,6 +279,9 @@ config OMAP_PM_NONE
config OMAP_PM_NOOP
bool "No-op/debug PM layer"
+config OMAP_PM
+ depends on PM
+ bool "OMAP PM layer implementation"
endchoice
endmenu
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index f0233e6..476d817 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -16,12 +16,13 @@ obj-$(CONFIG_ARCH_OMAP16XX) += ocpi.o
obj-$(CONFIG_ARCH_OMAP2) += omap_device.o
obj-$(CONFIG_ARCH_OMAP3) += omap_device.o
obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
+obj-$(CONFIG_REMOTE_PROC) += rproc_user.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
+obj-$(CONFIG_OMAP_RPMSG) += omap_rpmsg.o
obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o
-obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o
obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
@@ -31,4 +32,5 @@ obj-y += $(i2c-omap-m) $(i2c-omap-y)
# OMAP mailbox framework
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox.o
-obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-noop.o
+obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-interface.o
+obj-$(CONFIG_OMAP_PM) += omap-pm-interface.o omap-pm-helper.o
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index c9122dd..b327956 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -441,6 +441,8 @@ static int __init clk_disable_unused(void)
return 0;
pr_info("clock: disabling unused clocks to save power\n");
+
+ spin_lock_irqsave(&clockfw_lock, flags);
list_for_each_entry(ck, &clocks, node) {
if (ck->ops == &clkops_null)
continue;
@@ -448,10 +450,9 @@ static int __init clk_disable_unused(void)
if (ck->usecount > 0 || !ck->enable_reg)
continue;
- spin_lock_irqsave(&clockfw_lock, flags);
arch_clock->clk_disable_unused(ck);
- spin_unlock_irqrestore(&clockfw_lock, flags);
}
+ spin_unlock_irqrestore(&clockfw_lock, flags);
return 0;
}
@@ -475,8 +476,43 @@ int __init clk_init(struct clk_functions * custom_clocks)
/*
* debugfs support to trace clock tree hierarchy and attributes
*/
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
static struct dentry *clk_debugfs_root;
+static int clk_dbg_show_summary(struct seq_file *s, void *unused)
+{
+ struct clk *c;
+ struct clk *pa;
+
+ seq_printf(s, "%-30s %-30s %-10s %s\n",
+ "clock-name", "parent-name", "rate", "use-count");
+
+ mutex_lock(&clocks_mutex);
+ list_for_each_entry(c, &clocks, node) {
+ pa = c->parent;
+ seq_printf(s, "%-30s %-30s %-10lu %d\n",
+ c->name, pa ? pa->name : "none", c->rate, c->usecount);
+ }
+
+ mutex_unlock(&clocks_mutex);
+ return 0;
+}
+
+static int clk_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_dbg_show_summary, inode->i_private);
+}
+
+static const struct file_operations debug_clock_fops = {
+ .open = clk_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int clk_debugfs_register_one(struct clk *c)
{
int err;
@@ -551,6 +587,12 @@ static int __init clk_debugfs_init(void)
if (err)
goto err_out;
}
+
+ d = debugfs_create_file("summary", S_IRUGO,
+ d, NULL, &debug_clock_fops);
+ if (!d)
+ return -ENOMEM;
+
return 0;
err_out:
debugfs_remove_recursive(clk_debugfs_root);
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index d9f10a3..0c8c8b5 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -20,7 +20,7 @@
#include <plat/board.h>
#include <plat/vram.h>
#include <plat/dsp.h>
-
+#include <plat/remoteproc.h>
#define NO_LENGTH_CHECK 0xffffffff
@@ -65,4 +65,5 @@ void __init omap_reserve(void)
omapfb_reserve_sdram_memblock();
omap_vram_reserve_sdram_memblock();
omap_dsp_reserve_sdram_memblock();
+ omap_ipu_reserve_sdram_memblock();
}
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index f7fed60..91b2ec0 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/syscore_ops.h>
#include <asm/sched_clock.h>
@@ -120,9 +121,40 @@ static DEFINE_CLOCK_DATA(cd);
#define SC_MULT 4000000000u
#define SC_SHIFT 17
+static u32 sched_clock_cyc_offset;
+static u32 sched_clock_cyc_suspend;
+static bool sched_clock_suspended;
+
+static int sched_clock_suspend(void)
+{
+ sched_clock_suspended = true;
+ sched_clock_cyc_suspend = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_offset;
+
+ return 0;
+}
+
+static void sched_clock_resume(void)
+{
+ sched_clock_cyc_offset = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_suspend;
+ sched_clock_suspended = false;
+}
+
+static struct syscore_ops sched_clock_syscore_ops = {
+ .suspend = sched_clock_suspend,
+ .resume = sched_clock_resume,
+};
+
static inline unsigned long long notrace _omap_32k_sched_clock(void)
{
- u32 cyc = clocksource_32k.read(&clocksource_32k);
+ u32 cyc;
+ if (!sched_clock_suspended)
+ cyc = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_offset;
+ else
+ cyc = sched_clock_cyc_suspend;
+
return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
}
@@ -140,7 +172,8 @@ unsigned long long notrace omap_32k_sched_clock(void)
static void notrace omap_update_sched_clock(void)
{
- u32 cyc = clocksource_32k.read(&clocksource_32k);
+ u32 cyc = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_offset;
update_sched_clock(&cd, cyc, (u32)~0);
}
@@ -152,22 +185,27 @@ static void notrace omap_update_sched_clock(void)
* nsecs and adds to a monotonically increasing timespec.
*/
static struct timespec persistent_ts;
-static cycles_t cycles, last_cycles;
+static cycles_t cycles;
+static DEFINE_SPINLOCK(read_persistent_clock_lock);
void read_persistent_clock(struct timespec *ts)
{
unsigned long long nsecs;
- cycles_t delta;
- struct timespec *tsp = &persistent_ts;
+ cycles_t last_cycles;
+ unsigned long flags;
+
+ spin_lock_irqsave(&read_persistent_clock_lock, flags);
last_cycles = cycles;
cycles = clocksource_32k.read(&clocksource_32k);
- delta = cycles - last_cycles;
- nsecs = clocksource_cyc2ns(delta,
+ nsecs = clocksource_cyc2ns(cycles - last_cycles,
clocksource_32k.mult, clocksource_32k.shift);
- timespec_add_ns(tsp, nsecs);
- *ts = *tsp;
+ timespec_add_ns(&persistent_ts, nsecs);
+
+ *ts = persistent_ts;
+
+ spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
}
int __init omap_init_clocksource_32k(void)
@@ -202,6 +240,8 @@ int __init omap_init_clocksource_32k(void)
init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
32768, SC_MULT, SC_SHIFT);
+
+ register_syscore_ops(&sched_clock_syscore_ops);
}
return 0;
}
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
deleted file mode 100644
index da4f68d..0000000
--- a/arch/arm/plat-omap/cpu-omap.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * linux/arch/arm/plat-omap/cpu-omap.c
- *
- * CPU frequency scaling for OMAP
- *
- * Copyright (C) 2005 Nokia Corporation
- * Written by Tony Lindgren <tony@atomide.com>
- *
- * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <plat/clock.h>
-#include <asm/system.h>
-
-#define VERY_HI_RATE 900000000
-
-static struct cpufreq_frequency_table *freq_table;
-
-#ifdef CONFIG_ARCH_OMAP1
-#define MPU_CLK "mpu"
-#else
-#define MPU_CLK "virt_prcm_set"
-#endif
-
-static struct clk *mpu_clk;
-
-/* TODO: Add support for SDRAM timing changes */
-
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
-
- if (policy->cpu)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
- policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
- policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
-
-static unsigned int omap_getspeed(unsigned int cpu)
-{
- unsigned long rate;
-
- if (cpu)
- return 0;
-
- rate = clk_get_rate(mpu_clk) / 1000;
- return rate;
-}
-
-static int omap_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_freqs freqs;
- int ret = 0;
-
- /* Ensure desired rate is within allowed range. Some govenors
- * (ondemand) will just pass target_freq=0 to get the minimum. */
- if (target_freq < policy->min)
- target_freq = policy->min;
- if (target_freq > policy->max)
- target_freq = policy->max;
-
- freqs.old = omap_getspeed(0);
- freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
- freqs.cpu = 0;
-
- if (freqs.old == freqs.new)
- return ret;
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-#ifdef CONFIG_CPU_FREQ_DEBUG
- printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n",
- freqs.old, freqs.new);
-#endif
- ret = clk_set_rate(mpu_clk, freqs.new * 1000);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
-}
-
-static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
-{
- int result = 0;
-
- mpu_clk = clk_get(NULL, MPU_CLK);
- if (IS_ERR(mpu_clk))
- return PTR_ERR(mpu_clk);
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- policy->cur = policy->min = policy->max = omap_getspeed(0);
-
- clk_init_cpufreq_table(&freq_table);
- if (freq_table) {
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!result)
- cpufreq_frequency_table_get_attr(freq_table,
- policy->cpu);
- } else {
- policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
- policy->cpuinfo.max_freq = clk_round_rate(mpu_clk,
- VERY_HI_RATE) / 1000;
- }
-
- /* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- return 0;
-}
-
-static int omap_cpu_exit(struct cpufreq_policy *policy)
-{
- clk_exit_cpufreq_table(&freq_table);
- clk_put(mpu_clk);
- return 0;
-}
-
-static struct freq_attr *omap_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static struct cpufreq_driver omap_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = omap_verify_speed,
- .target = omap_target,
- .get = omap_getspeed,
- .init = omap_cpu_init,
- .exit = omap_cpu_exit,
- .name = "omap",
- .attr = omap_cpufreq_attr,
-};
-
-static int __init omap_cpufreq_init(void)
-{
- return cpufreq_register_driver(&omap_driver);
-}
-
-arch_initcall(omap_cpufreq_init);
-
-/*
- * if ever we want to remove this, upon cleanup call:
- *
- * cpufreq_unregister_driver()
- * cpufreq_frequency_table_put_attr()
- */
-
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index ea28f98..d8add7e7 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -16,17 +16,21 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/err.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
#include <plat/tc.h>
#include <plat/board.h>
#include <plat/mmc.h>
#include <mach/gpio.h>
#include <plat/menelaus.h>
#include <plat/mcbsp.h>
+#include <plat/remoteproc.h>
#include <plat/omap44xx.h>
/*-------------------------------------------------------------------------*/
@@ -74,41 +78,6 @@ void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \
- defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE)
-
-static struct resource mcpdm_resources[] = {
- {
- .name = "mcpdm_mem",
- .start = OMAP44XX_MCPDM_BASE,
- .end = OMAP44XX_MCPDM_BASE + SZ_4K,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "mcpdm_irq",
- .start = OMAP44XX_IRQ_MCPDM,
- .end = OMAP44XX_IRQ_MCPDM,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device omap_mcpdm_device = {
- .name = "omap-mcpdm",
- .id = -1,
- .num_resources = ARRAY_SIZE(mcpdm_resources),
- .resource = mcpdm_resources,
-};
-
-static void omap_init_mcpdm(void)
-{
- (void) platform_device_register(&omap_mcpdm_device);
-}
-#else
-static inline void omap_init_mcpdm(void) {}
-#endif
-
-/*-------------------------------------------------------------------------*/
-
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
@@ -190,8 +159,6 @@ static void omap_init_rng(void)
static inline void omap_init_rng(void) {}
#endif
-/*-------------------------------------------------------------------------*/
-
/* Numbering for the SPI-capable controllers when used for SPI:
* spi = 1
* uwire = 2
@@ -237,6 +204,7 @@ static inline void omap_init_uwire(void) {}
#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
static phys_addr_t omap_dsp_phys_mempool_base;
+static phys_addr_t omap_dsp_phys_mempool_size;
void __init omap_dsp_reserve_sdram_memblock(void)
{
@@ -256,6 +224,7 @@ void __init omap_dsp_reserve_sdram_memblock(void)
memblock_remove(paddr, size);
omap_dsp_phys_mempool_base = paddr;
+ omap_dsp_phys_mempool_size = size;
}
phys_addr_t omap_dsp_get_mempool_base(void)
@@ -263,6 +232,73 @@ phys_addr_t omap_dsp_get_mempool_base(void)
return omap_dsp_phys_mempool_base;
}
EXPORT_SYMBOL(omap_dsp_get_mempool_base);
+
+phys_addr_t omap_dsp_get_mempool_size(void)
+{
+ return omap_dsp_phys_mempool_size;
+}
+EXPORT_SYMBOL(omap_dsp_get_mempool_size);
+#endif
+
+#if defined(CONFIG_OMAP_REMOTE_PROC)
+static phys_addr_t omap_ipu_phys_mempool_base;
+static u32 omap_ipu_phys_mempool_size;
+static phys_addr_t omap_ipu_phys_st_mempool_base;
+static u32 omap_ipu_phys_st_mempool_size;
+
+void __init omap_ipu_reserve_sdram_memblock(void)
+{
+ /* currently handles only ipu. dsp will be handled later...*/
+ u32 size = CONFIG_OMAP_REMOTEPROC_MEMPOOL_SIZE;
+ phys_addr_t paddr;
+
+ if (!size)
+ return;
+
+ paddr = memblock_alloc(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve %x bytes\n",
+ __func__, size);
+ return;
+ }
+ memblock_free(paddr, size);
+ memblock_remove(paddr, size);
+
+ omap_ipu_phys_mempool_base = paddr;
+ omap_ipu_phys_mempool_size = size;
+}
+
+void __init omap_ipu_set_static_mempool(u32 start, u32 size)
+{
+ omap_ipu_phys_st_mempool_base = start;
+ omap_ipu_phys_st_mempool_size = size;
+}
+
+phys_addr_t omap_ipu_get_mempool_base(enum omap_rproc_mempool_type type)
+{
+ switch (type) {
+ case OMAP_RPROC_MEMPOOL_STATIC:
+ return omap_ipu_phys_st_mempool_base;
+ case OMAP_RPROC_MEMPOOL_DYNAMIC:
+ return omap_ipu_phys_mempool_base;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(omap_ipu_get_mempool_base);
+
+u32 omap_ipu_get_mempool_size(enum omap_rproc_mempool_type type)
+{
+ switch (type) {
+ case OMAP_RPROC_MEMPOOL_STATIC:
+ return omap_ipu_phys_st_mempool_size;
+ case OMAP_RPROC_MEMPOOL_DYNAMIC:
+ return omap_ipu_phys_mempool_size;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(omap_ipu_get_mempool_size);
#endif
/*
@@ -291,7 +327,6 @@ static int __init omap_init_devices(void)
* in alphabetical order so they're easier to sort through.
*/
omap_init_rng();
- omap_init_mcpdm();
omap_init_uwire();
return 0;
}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c22217c..3ec7ec5 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1024,12 +1024,26 @@ EXPORT_SYMBOL(omap_set_dma_callback);
*/
dma_addr_t omap_get_dma_src_pos(int lch)
{
+ u32 cdac;
dma_addr_t offset = 0;
if (cpu_is_omap15xx())
offset = p->dma_read(CPC, lch);
- else
- offset = p->dma_read(CSAC, lch);
+ else {
+ /*
+ * CDAC != 0 indicates that the DMA transfer on the channel has
+ * been started already.
+ * If CDAC == 0, we can not trust the CSAC value since it has
+ * not been updated, and can contain random number.
+ * Return the start address in case the DMA has not jet started.
+ * This is valid since in fact the DMA has not yet progressed.
+ */
+ cdac = p->dma_read(CDAC, lch);
+ if (likely(cdac))
+ offset = p->dma_read(CSAC, lch);
+ else
+ offset = p->dma_read(CSSA, lch);
+ }
if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
offset = p->dma_read(CSAC, lch);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index ee9f6eb..4278e3c 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -3,6 +3,12 @@
*
* OMAP Dual-Mode Timers
*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * dmtimer adaptation to platform_driver.
+ *
* Copyright (C) 2005 Nokia Corporation
* OMAP2 support by Juha Yrjola
* API improvements and OMAP2 clock framework support by Timo Teras
@@ -29,17 +35,17 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/list.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/module.h>
-#include <mach/hardware.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
#include <plat/dmtimer.h>
-#include <mach/irqs.h>
+#include <plat/common.h>
+#include <plat/omap-pm.h>
/* register offsets */
#define _OMAP_TIMER_ID_OFFSET 0x00
@@ -150,168 +156,144 @@
#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
(_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-struct omap_dm_timer {
- unsigned long phys_base;
- int irq;
-#ifdef CONFIG_ARCH_OMAP2PLUS
- struct clk *iclk, *fclk;
-#endif
- void __iomem *io_base;
- unsigned reserved:1;
- unsigned enabled:1;
- unsigned posted:1;
-};
+/*
+ * OMAP4 IP revision has different register offsets
+ * for interrupt registers and functional registers.
+ */
+#define VERSION2_TIMER_WAKEUP_EN_REG_OFFSET 0x14
+#define VERSION2_TIMER_STAT_REG_OFFSET 0x10
-static int dm_timer_count;
-
-#ifdef CONFIG_ARCH_OMAP1
-static struct omap_dm_timer omap1_dm_timers[] = {
- { .phys_base = 0xfffb1400, .irq = INT_1610_GPTIMER1 },
- { .phys_base = 0xfffb1c00, .irq = INT_1610_GPTIMER2 },
- { .phys_base = 0xfffb2400, .irq = INT_1610_GPTIMER3 },
- { .phys_base = 0xfffb2c00, .irq = INT_1610_GPTIMER4 },
- { .phys_base = 0xfffb3400, .irq = INT_1610_GPTIMER5 },
- { .phys_base = 0xfffb3c00, .irq = INT_1610_GPTIMER6 },
- { .phys_base = 0xfffb7400, .irq = INT_1610_GPTIMER7 },
- { .phys_base = 0xfffbd400, .irq = INT_1610_GPTIMER8 },
-};
+#define MAX_WRITE_PEND_WAIT 10000 /* 10ms timeout delay */
-static const int omap1_dm_timer_count = ARRAY_SIZE(omap1_dm_timers);
+static LIST_HEAD(omap_timer_list);
+static DEFINE_MUTEX(dm_timer_mutex);
-#else
-#define omap1_dm_timers NULL
-#define omap1_dm_timer_count 0
-#endif /* CONFIG_ARCH_OMAP1 */
-
-#ifdef CONFIG_ARCH_OMAP2
-static struct omap_dm_timer omap2_dm_timers[] = {
- { .phys_base = 0x48028000, .irq = INT_24XX_GPTIMER1 },
- { .phys_base = 0x4802a000, .irq = INT_24XX_GPTIMER2 },
- { .phys_base = 0x48078000, .irq = INT_24XX_GPTIMER3 },
- { .phys_base = 0x4807a000, .irq = INT_24XX_GPTIMER4 },
- { .phys_base = 0x4807c000, .irq = INT_24XX_GPTIMER5 },
- { .phys_base = 0x4807e000, .irq = INT_24XX_GPTIMER6 },
- { .phys_base = 0x48080000, .irq = INT_24XX_GPTIMER7 },
- { .phys_base = 0x48082000, .irq = INT_24XX_GPTIMER8 },
- { .phys_base = 0x48084000, .irq = INT_24XX_GPTIMER9 },
- { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 },
- { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 },
- { .phys_base = 0x4808a000, .irq = INT_24XX_GPTIMER12 },
-};
+/**
+ * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
+ * @timer: timer pointer over which read operation to perform
+ * @reg: lowest byte holds the register offset
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode write
+ * pending bit must be checked. Otherwise a read of a non completed write
+ * will produce an error.
+ */
+static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
+{
+ int i = 0;
-static const char *omap2_dm_source_names[] __initdata = {
- "sys_ck",
- "func_32k_ck",
- "alt_ck",
- NULL
-};
+ if (reg >= OMAP_TIMER_WAKEUP_EN_REG)
+ reg += timer->func_offset;
+ else if (reg >= OMAP_TIMER_STAT_REG)
+ reg += timer->intr_offset;
-static struct clk *omap2_dm_source_clocks[3];
-static const int omap2_dm_timer_count = ARRAY_SIZE(omap2_dm_timers);
+ if (timer->posted) {
+ omap_test_timeout(!(readl(timer->io_base +
+ ((OMAP_TIMER_WRITE_PEND_REG +
+ timer->func_offset) & 0xff)) & (reg >> WPSHIFT)),
+ MAX_WRITE_PEND_WAIT, i);
-#else
-#define omap2_dm_timers NULL
-#define omap2_dm_timer_count 0
-#define omap2_dm_source_names NULL
-#define omap2_dm_source_clocks NULL
-#endif /* CONFIG_ARCH_OMAP2 */
-
-#ifdef CONFIG_ARCH_OMAP3
-static struct omap_dm_timer omap3_dm_timers[] = {
- { .phys_base = 0x48318000, .irq = INT_24XX_GPTIMER1 },
- { .phys_base = 0x49032000, .irq = INT_24XX_GPTIMER2 },
- { .phys_base = 0x49034000, .irq = INT_24XX_GPTIMER3 },
- { .phys_base = 0x49036000, .irq = INT_24XX_GPTIMER4 },
- { .phys_base = 0x49038000, .irq = INT_24XX_GPTIMER5 },
- { .phys_base = 0x4903A000, .irq = INT_24XX_GPTIMER6 },
- { .phys_base = 0x4903C000, .irq = INT_24XX_GPTIMER7 },
- { .phys_base = 0x4903E000, .irq = INT_24XX_GPTIMER8 },
- { .phys_base = 0x49040000, .irq = INT_24XX_GPTIMER9 },
- { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 },
- { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 },
- { .phys_base = 0x48304000, .irq = INT_34XX_GPT12_IRQ },
-};
+ if (WARN_ON_ONCE(i == MAX_WRITE_PEND_WAIT))
+ dev_err(&timer->pdev->dev, "read timeout.\n");
+ }
-static const char *omap3_dm_source_names[] __initdata = {
- "sys_ck",
- "omap_32k_fck",
- NULL
-};
+ return readl(timer->io_base + (reg & 0xff));
+}
-static struct clk *omap3_dm_source_clocks[2];
-static const int omap3_dm_timer_count = ARRAY_SIZE(omap3_dm_timers);
+/**
+ * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
+ * @timer: timer pointer over which write operation is to perform
+ * @reg: lowest byte holds the register offset
+ * @value: data to write into the register
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode the write
+ * pending bit must be checked. Otherwise a write on a register which has a
+ * pending write will be lost.
+ */
+static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
+ u32 value)
+{
+ int i = 0;
-#else
-#define omap3_dm_timers NULL
-#define omap3_dm_timer_count 0
-#define omap3_dm_source_names NULL
-#define omap3_dm_source_clocks NULL
-#endif /* CONFIG_ARCH_OMAP3 */
-
-#ifdef CONFIG_ARCH_OMAP4
-static struct omap_dm_timer omap4_dm_timers[] = {
- { .phys_base = 0x4a318000, .irq = OMAP44XX_IRQ_GPT1 },
- { .phys_base = 0x48032000, .irq = OMAP44XX_IRQ_GPT2 },
- { .phys_base = 0x48034000, .irq = OMAP44XX_IRQ_GPT3 },
- { .phys_base = 0x48036000, .irq = OMAP44XX_IRQ_GPT4 },
- { .phys_base = 0x40138000, .irq = OMAP44XX_IRQ_GPT5 },
- { .phys_base = 0x4013a000, .irq = OMAP44XX_IRQ_GPT6 },
- { .phys_base = 0x4013a000, .irq = OMAP44XX_IRQ_GPT7 },
- { .phys_base = 0x4013e000, .irq = OMAP44XX_IRQ_GPT8 },
- { .phys_base = 0x4803e000, .irq = OMAP44XX_IRQ_GPT9 },
- { .phys_base = 0x48086000, .irq = OMAP44XX_IRQ_GPT10 },
- { .phys_base = 0x48088000, .irq = OMAP44XX_IRQ_GPT11 },
- { .phys_base = 0x4a320000, .irq = OMAP44XX_IRQ_GPT12 },
-};
-static const char *omap4_dm_source_names[] __initdata = {
- "sys_clkin_ck",
- "sys_32k_ck",
- NULL
-};
-static struct clk *omap4_dm_source_clocks[2];
-static const int omap4_dm_timer_count = ARRAY_SIZE(omap4_dm_timers);
+ if (reg >= OMAP_TIMER_WAKEUP_EN_REG)
+ reg += timer->func_offset;
+ else if (reg >= OMAP_TIMER_STAT_REG)
+ reg += timer->intr_offset;
-#else
-#define omap4_dm_timers NULL
-#define omap4_dm_timer_count 0
-#define omap4_dm_source_names NULL
-#define omap4_dm_source_clocks NULL
-#endif /* CONFIG_ARCH_OMAP4 */
+ if (timer->posted) {
+ omap_test_timeout(!(readl(timer->io_base +
+ ((OMAP_TIMER_WRITE_PEND_REG +
+ timer->func_offset) & 0xff)) & (reg >> WPSHIFT)),
+ MAX_WRITE_PEND_WAIT, i);
-static struct omap_dm_timer *dm_timers;
-static const char **dm_source_names;
-static struct clk **dm_source_clocks;
+ if (WARN_ON(i == MAX_WRITE_PEND_WAIT))
+ dev_err(&timer->pdev->dev, "write timeout.\n");
+ }
-static spinlock_t dm_timer_lock;
+ writel(value, timer->io_base + (reg & 0xff));
+}
-/*
- * Reads timer registers in posted and non-posted mode. The posted mode bit
- * is encoded in reg. Note that in posted mode write pending bit must be
- * checked. Otherwise a read of a non completed write will produce an error.
- */
-static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
+static void omap_timer_save_context(struct omap_dm_timer *timer)
{
- if (timer->posted)
- while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
- & (reg >> WPSHIFT))
- cpu_relax();
- return readl(timer->io_base + (reg & 0xff));
+ timer->context.tiocp_cfg =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
+ timer->context.tistat =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_SYS_STAT_REG);
+ timer->context.tisr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
+ timer->context.tier =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_INT_EN_REG);
+ timer->context.twer =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG);
+ timer->context.tclr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ timer->context.tcrr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
+ timer->context.tldr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_LOAD_REG);
+ timer->context.tmar =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_MATCH_REG);
+ timer->context.tsicr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_IF_CTRL_REG);
}
-/*
- * Writes timer registers in posted and non-posted mode. The posted mode bit
- * is encoded in reg. Note that in posted mode the write pending bit must be
- * checked. Otherwise a write on a register which has a pending write will be
- * lost.
- */
-static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
- u32 value)
+static void omap_timer_restore_context(struct omap_dm_timer *timer)
{
- if (timer->posted)
- while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
- & (reg >> WPSHIFT))
- cpu_relax();
- writel(value, timer->io_base + (reg & 0xff));
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG,
+ timer->context.tiocp_cfg);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_SYS_STAT_REG,
+ timer->context.tistat);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
+ timer->context.tisr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG,
+ timer->context.tier);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
+ timer->context.twer);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
+ timer->context.tclr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
+ timer->context.tcrr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
+ timer->context.tldr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
+ timer->context.tmar);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
+ timer->context.tsicr);
+}
+
+static void __timer_enable(struct omap_dm_timer *timer)
+{
+ if (!timer->enabled) {
+ pm_runtime_get_sync(&timer->pdev->dev);
+ timer->enabled = 1;
+ }
+}
+
+static void __timer_disable(struct omap_dm_timer *timer)
+{
+ if (timer->enabled) {
+ pm_runtime_put_sync_suspend(&timer->pdev->dev);
+ timer->enabled = 0;
+ }
}
static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
@@ -332,58 +314,89 @@ static void omap_dm_timer_reset(struct omap_dm_timer *timer)
{
u32 l;
- if (!cpu_class_is_omap2() || timer != &dm_timers[0]) {
+ if (!timer->is_early_init)
+ __timer_enable(timer);
+
+ if (timer->pdev->id != 1) {
omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
omap_dm_timer_wait_for_reset(timer);
}
- omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
l |= 0x02 << 3; /* Set to smart-idle mode */
l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
- /* Enable autoidle on OMAP2 / OMAP3 */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- l |= 0x1 << 0;
+ if (!timer->is_early_init)
+ __timer_disable(timer);
+}
- /*
- * Enable wake-up on OMAP2 CPUs.
- */
- if (cpu_class_is_omap2())
- l |= 1 << 2;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
+static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+{
+ int ret;
+
+ timer->fclk = clk_get(&timer->pdev->dev, "fck");
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
+ timer->fclk = NULL;
+ dev_err(&timer->pdev->dev, ": No fclk handle.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(timer->is_early_init)) {
+ ret = clk_enable(timer->fclk);
+ if (ret) {
+ clk_put(timer->fclk);
+ return -EINVAL;
+ }
+ goto end;
+ }
+
+ if (timer->needs_manual_reset)
+ omap_dm_timer_reset(timer);
+
+ omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
+
+end:
+ if (!timer->is_early_init)
+ __timer_enable(timer);
/* Match hardware reset default of posted mode */
omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
OMAP_TIMER_CTRL_POSTED);
- timer->posted = 1;
-}
-static void omap_dm_timer_prepare(struct omap_dm_timer *timer)
-{
- omap_dm_timer_enable(timer);
- omap_dm_timer_reset(timer);
+ if (!timer->is_early_init)
+ __timer_disable(timer);
+
+ timer->posted = 1;
+ return 0;
}
struct omap_dm_timer *omap_dm_timer_request(void)
{
- struct omap_dm_timer *timer = NULL;
- unsigned long flags;
- int i;
+ struct omap_dm_timer *timer = NULL, *t;
+ int ret;
- spin_lock_irqsave(&dm_timer_lock, flags);
- for (i = 0; i < dm_timer_count; i++) {
- if (dm_timers[i].reserved)
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(t, &omap_timer_list, node) {
+ if (t->reserved)
continue;
- timer = &dm_timers[i];
+ timer = t;
timer->reserved = 1;
+ timer->enabled = 0;
break;
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
+ mutex_unlock(&dm_timer_mutex);
- if (timer != NULL)
- omap_dm_timer_prepare(timer);
+ if (!timer) {
+ pr_debug("%s: free timer not available.\n", __func__);
+ return NULL;
+ }
+ ret = omap_dm_timer_prepare(timer);
+ if (ret) {
+ timer->reserved = 0;
+ return NULL;
+ }
return timer;
}
@@ -391,74 +404,88 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_request);
struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
- struct omap_dm_timer *timer;
- unsigned long flags;
+ struct omap_dm_timer *timer = NULL, *t;
+ int ret;
+
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(t, &omap_timer_list, node) {
+ if (t->pdev->id == id && !t->reserved) {
+ timer = t;
+ timer->reserved = 1;
+ timer->enabled = 0;
+ break;
+ }
+ }
+ mutex_unlock(&dm_timer_mutex);
- spin_lock_irqsave(&dm_timer_lock, flags);
- if (id <= 0 || id > dm_timer_count || dm_timers[id-1].reserved) {
- spin_unlock_irqrestore(&dm_timer_lock, flags);
- printk("BUG: warning at %s:%d/%s(): unable to get timer %d\n",
- __FILE__, __LINE__, __func__, id);
- dump_stack();
+ if (!timer) {
+ pr_debug("%s: timer%d not available.\n", __func__, id);
+ return NULL;
+ }
+ ret = omap_dm_timer_prepare(timer);
+ if (ret) {
+ timer->reserved = 0;
return NULL;
}
-
- timer = &dm_timers[id-1];
- timer->reserved = 1;
- spin_unlock_irqrestore(&dm_timer_lock, flags);
-
- omap_dm_timer_prepare(timer);
return timer;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific);
-void omap_dm_timer_free(struct omap_dm_timer *timer)
+int omap_dm_timer_free(struct omap_dm_timer *timer)
{
- omap_dm_timer_enable(timer);
- omap_dm_timer_reset(timer);
- omap_dm_timer_disable(timer);
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (!timer->reserved) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
+ }
+
+ __timer_disable(timer);
+ clk_put(timer->fclk);
- WARN_ON(!timer->reserved);
timer->reserved = 0;
+ timer->context_saved = false;
+
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_free);
-void omap_dm_timer_enable(struct omap_dm_timer *timer)
+int omap_dm_timer_enable(struct omap_dm_timer *timer)
{
- if (timer->enabled)
- return;
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
- if (cpu_class_is_omap2()) {
- clk_enable(timer->fclk);
- clk_enable(timer->iclk);
- }
-#endif
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
- timer->enabled = 1;
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
-void omap_dm_timer_disable(struct omap_dm_timer *timer)
+int omap_dm_timer_disable(struct omap_dm_timer *timer)
{
- if (!timer->enabled)
- return;
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
- if (cpu_class_is_omap2()) {
- clk_disable(timer->iclk);
- clk_disable(timer->fclk);
- }
-#endif
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
- timer->enabled = 0;
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
{
- return timer->irq;
+ if (timer)
+ return timer->irq;
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq);
@@ -470,24 +497,29 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq);
*/
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
- int i;
+ int i = 0;
+ struct omap_dm_timer *timer = NULL;
/* If ARMXOR cannot be idled this function call is unnecessary */
if (!(inputmask & (1 << 1)))
return inputmask;
/* If any active timer is using ARMXOR return modified mask */
- for (i = 0; i < dm_timer_count; i++) {
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(timer, &omap_timer_list, node) {
+
u32 l;
- l = omap_dm_timer_read_reg(&dm_timers[i], OMAP_TIMER_CTRL_REG);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
else
inputmask &= ~(1 << 2);
}
+ i++;
}
+ mutex_unlock(&dm_timer_mutex);
return inputmask;
}
@@ -497,7 +529,9 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
{
- return timer->fclk;
+ if (timer)
+ return timer->fclk;
+ return NULL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk);
@@ -511,75 +545,116 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
#endif
-void omap_dm_timer_trigger(struct omap_dm_timer *timer)
+int omap_dm_timer_trigger(struct omap_dm_timer *timer)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->enabled) {
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_trigger);
-void omap_dm_timer_start(struct omap_dm_timer *timer)
+int omap_dm_timer_start(struct omap_dm_timer *timer)
{
u32 l;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->loses_context) {
+ __timer_enable(timer);
+ if (omap_pm_was_context_lost(&timer->pdev->dev) &&
+ timer->context_saved) {
+ omap_timer_restore_context(timer);
+ timer->context_saved = false;
+ }
+ }
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (!(l & OMAP_TIMER_CTRL_ST)) {
l |= OMAP_TIMER_CTRL_ST;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
}
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_start);
-void omap_dm_timer_stop(struct omap_dm_timer *timer)
+int omap_dm_timer_stop(struct omap_dm_timer *timer)
{
u32 l;
+ struct dmtimer_platform_data *pdata;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (!timer->enabled) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
+ }
+ pdata = timer->pdev->dev.platform_data;
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
l &= ~0x1;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
-#ifdef CONFIG_ARCH_OMAP2PLUS
- /* Readback to make sure write has completed */
- omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- /*
- * Wait for functional clock period x 3.5 to make sure that
- * timer is stopped
- */
- udelay(3500000 / clk_get_rate(timer->fclk) + 1);
-#endif
+
+ if (!pdata->needs_manual_reset) {
+ /* Readback to make sure write has completed */
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ /*
+ * Wait for functional clock period x 3.5 to make
+ * sure that timer is stopped
+ */
+ udelay(3500000 / clk_get_rate(timer->fclk) + 1);
+ }
}
/* Ack possibly pending interrupt */
omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
OMAP_TIMER_INT_OVERFLOW);
-}
-EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
-
-#ifdef CONFIG_ARCH_OMAP1
-
-int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
-{
- int n = (timer - dm_timers) << 1;
- u32 l;
-
- l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
- l |= source << n;
- omap_writel(l, MOD_CONF_CTRL_1);
+ if (timer->loses_context) {
+ omap_timer_save_context(timer);
+ timer->context_saved = true;
+ __timer_disable(timer);
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
-
-#else
+EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
- int ret = -EINVAL;
+ int ret;
+ struct dmtimer_platform_data *pdata;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
if (source < 0 || source >= 3)
return -EINVAL;
- clk_disable(timer->fclk);
- ret = clk_set_parent(timer->fclk, dm_source_clocks[source]);
- clk_enable(timer->fclk);
+ spin_lock_irqsave(&timer->lock, flags);
+ pdata = timer->pdev->dev.platform_data;
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+
+ /* change the timer clock source */
+ ret = pdata->set_timer_src(timer->pdev, source);
/*
* When the functional clock disappears, too quick writes seem
@@ -591,13 +666,17 @@ int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
-#endif
-
-void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
+int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
unsigned int load)
{
u32 l;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (autoreload)
l |= OMAP_TIMER_CTRL_AR;
@@ -607,14 +686,30 @@ void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_load);
-/* Optimized set_load which removes costly spin wait in timer_start */
-void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
+int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
unsigned int load)
{
u32 l;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->loses_context) {
+ __timer_enable(timer);
+ if (omap_pm_was_context_lost(&timer->pdev->dev) &&
+ timer->context_saved) {
+ omap_timer_restore_context(timer);
+ timer->context_saved = false;
+ }
+ }
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (autoreload) {
@@ -627,14 +722,22 @@ void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, load);
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
-void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
unsigned int match)
{
u32 l;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (enable)
l |= OMAP_TIMER_CTRL_CE;
@@ -642,14 +745,23 @@ void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
l &= ~OMAP_TIMER_CTRL_CE;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_match);
-void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
int toggle, int trigger)
{
u32 l;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
OMAP_TIMER_CTRL_PT | (0x03 << 10));
@@ -659,13 +771,22 @@ void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
l |= OMAP_TIMER_CTRL_PT;
l |= trigger << 10;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm);
-void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
+int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
{
u32 l;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
if (prescaler >= 0x00 && prescaler <= 0x07) {
@@ -673,58 +794,115 @@ void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
l |= prescaler << 2;
}
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
-void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
unsigned int value)
{
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (!timer->is_early_init)
+ __timer_enable(timer);
+
omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
+
+ if (!timer->is_early_init)
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
{
- unsigned int l;
+ unsigned long flags;
+ unsigned int ret;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
+ if (WARN_ON(!timer))
+ return -EINVAL;
- return l;
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ ret = omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return ret;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ WARN_ON(!timer->enabled);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
-void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
+int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
{
- unsigned int l;
+ unsigned long flags;
+ unsigned int ret;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
+ if (WARN_ON(!timer))
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ ret = omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return ret;
+ }
- return l;
+ spin_unlock_irqrestore(&timer->lock, flags);
+ WARN_ON(!timer->enabled);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
-void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
+int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter);
int omap_dm_timers_active(void)
{
- int i;
-
- for (i = 0; i < dm_timer_count; i++) {
- struct omap_dm_timer *timer;
-
- timer = &dm_timers[i];
+ struct omap_dm_timer *timer;
+ list_for_each_entry(timer, &omap_timer_list, node) {
if (!timer->enabled)
continue;
@@ -737,61 +915,146 @@ int omap_dm_timers_active(void)
}
EXPORT_SYMBOL_GPL(omap_dm_timers_active);
-int __init omap_dm_timer_init(void)
+/**
+ * omap_dm_timer_probe - probe function called for every registered device
+ * @pdev: pointer to current timer platform device
+ *
+ * Called by driver framework at the end of device registration for all
+ * timer devices.
+ */
+static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
{
+ int ret;
struct omap_dm_timer *timer;
- int i, map_size = SZ_8K; /* Module 4KB + L4 4KB except on omap1 */
+ struct resource *mem, *irq, *ioarea;
+ struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
- if (!(cpu_is_omap16xx() || cpu_class_is_omap2()))
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: no platform data.\n", __func__);
return -ENODEV;
+ }
- spin_lock_init(&dm_timer_lock);
-
- if (cpu_class_is_omap1()) {
- dm_timers = omap1_dm_timers;
- dm_timer_count = omap1_dm_timer_count;
- map_size = SZ_2K;
- } else if (cpu_is_omap24xx()) {
- dm_timers = omap2_dm_timers;
- dm_timer_count = omap2_dm_timer_count;
- dm_source_names = omap2_dm_source_names;
- dm_source_clocks = omap2_dm_source_clocks;
- } else if (cpu_is_omap34xx()) {
- dm_timers = omap3_dm_timers;
- dm_timer_count = omap3_dm_timer_count;
- dm_source_names = omap3_dm_source_names;
- dm_source_clocks = omap3_dm_source_clocks;
- } else if (cpu_is_omap44xx()) {
- dm_timers = omap4_dm_timers;
- dm_timer_count = omap4_dm_timer_count;
- dm_source_names = omap4_dm_source_names;
- dm_source_clocks = omap4_dm_source_clocks;
- }
-
- if (cpu_class_is_omap2())
- for (i = 0; dm_source_names[i] != NULL; i++)
- dm_source_clocks[i] = clk_get(NULL, dm_source_names[i]);
-
- if (cpu_is_omap243x())
- dm_timers[0].phys_base = 0x49018000;
-
- for (i = 0; i < dm_timer_count; i++) {
- timer = &dm_timers[i];
-
- /* Static mapping, never released */
- timer->io_base = ioremap(timer->phys_base, map_size);
- BUG_ON(!timer->io_base);
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
- if (cpu_class_is_omap2()) {
- char clk_name[16];
- sprintf(clk_name, "gpt%d_ick", i + 1);
- timer->iclk = clk_get(NULL, clk_name);
- sprintf(clk_name, "gpt%d_fck", i + 1);
- timer->fclk = clk_get(NULL, clk_name);
- }
-#endif
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (unlikely(!irq)) {
+ dev_err(&pdev->dev, "%s: no IRQ resource.\n", __func__);
+ return -ENODEV;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!mem)) {
+ dev_err(&pdev->dev, "%s: no memory resource.\n", __func__);
+ return -ENODEV;
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "%s: region already claimed.\n", __func__);
+ return -EBUSY;
+ }
+
+ timer = kzalloc(sizeof(struct omap_dm_timer), GFP_KERNEL);
+ if (!timer) {
+ dev_err(&pdev->dev, "%s: no memory for omap_dm_timer.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_release_ioregion;
+ }
+
+ timer->io_base = ioremap(mem->start, resource_size(mem));
+ if (!timer->io_base) {
+ dev_err(&pdev->dev, "%s: ioremap failed.\n", __func__);
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ if (pdata->timer_ip_type == OMAP_TIMER_IP_VERSION_2) {
+ timer->func_offset = VERSION2_TIMER_WAKEUP_EN_REG_OFFSET;
+ timer->intr_offset = VERSION2_TIMER_STAT_REG_OFFSET;
+ }
+
+ timer->irq = irq->start;
+ timer->pdev = pdev;
+ timer->is_early_init = pdata->is_early_init;
+ timer->needs_manual_reset = pdata->needs_manual_reset;
+ timer->loses_context = pdata->loses_context;
+
+ spin_lock_init(&timer->lock);
+ /* Skip pm_runtime_enable during early boot and for OMAP1 */
+ if (!pdata->is_early_init && !pdata->needs_manual_reset) {
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
}
+ /* add the timer element to the list */
+ mutex_lock(&dm_timer_mutex);
+ list_add_tail(&timer->node, &omap_timer_list);
+ mutex_unlock(&dm_timer_mutex);
+
+ dev_dbg(&pdev->dev, "Device Probed.\n");
+
return 0;
+
+err_free_mem:
+ kfree(timer);
+
+err_release_ioregion:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return ret;
+}
+
+/**
+ * omap_dm_timer_remove - cleanup a registered timer device
+ * @pdev: pointer to current timer platform device
+ *
+ * Called by driver framework whenever a timer device is unregistered.
+ * In addition to freeing platform resources it also deletes the timer
+ * entry from the local list.
+ */
+static int __devexit omap_dm_timer_remove(struct platform_device *pdev)
+{
+ struct omap_dm_timer *timer;
+ int ret = -EINVAL;
+
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(timer, &omap_timer_list, node) {
+ if (timer->pdev->id == pdev->id) {
+ list_del(&timer->node);
+ kfree(timer);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&dm_timer_mutex);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver omap_dm_timer_driver = {
+ .probe = omap_dm_timer_probe,
+ .remove = omap_dm_timer_remove,
+ .driver = {
+ .name = "omap_timer",
+ },
+};
+
+static int __init omap_dm_timer_driver_init(void)
+{
+ return platform_driver_register(&omap_dm_timer_driver);
+}
+
+static void __exit omap_dm_timer_driver_exit(void)
+{
+ platform_driver_unregister(&omap_dm_timer_driver);
}
+
+early_platform_init("earlytimer", &omap_dm_timer_driver);
+module_init(omap_dm_timer_driver_init);
+module_exit(omap_dm_timer_driver_exit);
+
+MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index 3341ca4..f8dc82f 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -113,16 +113,6 @@ static inline int omap1_i2c_add_bus(int bus_id)
#ifdef CONFIG_ARCH_OMAP2PLUS
-/*
- * XXX This function is a temporary compatibility wrapper - only
- * needed until the I2C driver can be converted to call
- * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
- */
-static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
-{
- omap_pm_set_max_mpu_wakeup_lat(dev, t);
-}
-
static struct omap_device_pm_latency omap_i2c_latency[] = {
[0] = {
.deactivate_func = omap_device_idle_hwmods,
@@ -158,8 +148,8 @@ static inline int omap2_i2c_add_bus(int bus_id)
* completes.
* Only omap3 has support for constraints
*/
- if (cpu_is_omap34xx())
- pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
+ pdata->needs_wakeup_latency = true;
od = omap_device_build(name, bus_id, oh, pdata,
sizeof(struct omap_i2c_bus_platform_data),
omap_i2c_latency, ARRAY_SIZE(omap_i2c_latency), 0);
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index f1899a3..324446b 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -39,11 +39,13 @@ struct omap_clk {
#define CK_36XX (1 << 10) /* 36xx/37xx-specific clocks */
#define CK_443X (1 << 11)
#define CK_TI816X (1 << 12)
+#define CK_446X (1 << 13)
#define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS)
#define CK_AM35XX (CK_3505 | CK_3517) /* all Sitara AM35xx */
#define CK_3XXX (CK_34XX | CK_AM35XX | CK_36XX)
+#define CK_44XX (CK_443X | CK_446X)
#endif
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 006e599..12a9ced 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -56,12 +56,14 @@ struct clkops {
#define RATE_IN_3430ES1 (1 << 2) /* 3430ES1 rates only */
#define RATE_IN_3430ES2PLUS (1 << 3) /* 3430 ES >= 2 rates only */
#define RATE_IN_36XX (1 << 4)
-#define RATE_IN_4430 (1 << 5)
+#define RATE_IN_443X (1 << 5)
#define RATE_IN_TI816X (1 << 6)
+#define RATE_IN_446X (1 << 7)
#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
#define RATE_IN_34XX (RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS)
#define RATE_IN_3XXX (RATE_IN_34XX | RATE_IN_36XX)
+#define RATE_IN_44XX (RATE_IN_443X | RATE_IN_446X)
/* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */
#define RATE_IN_3430ES2PLUS_36XX (RATE_IN_3430ES2PLUS | RATE_IN_36XX)
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 5288130..c8a65ba 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -53,6 +53,7 @@ struct omap_globals {
unsigned long sms; /* SDRAM Memory Scheduler */
unsigned long ctrl; /* System Control Module */
unsigned long ctrl_pad; /* PAD Control Module */
+ unsigned long ctrl_wk_pad; /* PAD Control WakeUp Module */
unsigned long prm; /* Power and Reset Management */
unsigned long cm; /* Clock Management */
unsigned long cm2;
@@ -96,5 +97,6 @@ extern struct device *omap2_get_mpuss_device(void);
extern struct device *omap2_get_iva_device(void);
extern struct device *omap2_get_l3_device(void);
extern struct device *omap4_get_dsp_device(void);
+extern struct device *omap4_get_fdif_device(void);
#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 8198bb6..6c9fc01 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -45,7 +45,7 @@
int omap_type(void);
struct omap_chip_id {
- u16 oc;
+ u32 oc;
u8 type;
};
@@ -88,6 +88,7 @@ unsigned int omap_rev(void);
* cpu_is_omap243x(): True for OMAP2430
* cpu_is_omap343x(): True for OMAP3430
* cpu_is_omap443x(): True for OMAP4430
+ * cpu_is_omap446x(): True for OMAP4460
*/
#define GET_OMAP_CLASS (omap_rev() & 0xff)
@@ -123,6 +124,7 @@ IS_OMAP_SUBCLASS(243x, 0x243)
IS_OMAP_SUBCLASS(343x, 0x343)
IS_OMAP_SUBCLASS(363x, 0x363)
IS_OMAP_SUBCLASS(443x, 0x443)
+IS_OMAP_SUBCLASS(446x, 0x446)
IS_TI_SUBCLASS(816x, 0x816)
@@ -137,6 +139,7 @@ IS_TI_SUBCLASS(816x, 0x816)
#define cpu_is_ti816x() 0
#define cpu_is_omap44xx() 0
#define cpu_is_omap443x() 0
+#define cpu_is_omap446x() 0
#if defined(MULTI_OMAP1)
# if defined(CONFIG_ARCH_OMAP730)
@@ -361,8 +364,10 @@ IS_OMAP_TYPE(3517, 0x3517)
# if defined(CONFIG_ARCH_OMAP4)
# undef cpu_is_omap44xx
# undef cpu_is_omap443x
+# undef cpu_is_omap446x
# define cpu_is_omap44xx() is_omap44xx()
# define cpu_is_omap443x() is_omap443x()
+# define cpu_is_omap446x() is_omap446x()
# endif
/* Macros to detect if we have OMAP1 or OMAP2 */
@@ -410,6 +415,10 @@ IS_OMAP_TYPE(3517, 0x3517)
#define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8))
#define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8))
+#define OMAP446X_CLASS 0x44600044
+#define OMAP4460_REV_ES1_0 (OMAP446X_CLASS | (0x10 << 8))
+#define OMAP4460_REV_ES1_1 (OMAP446X_CLASS | (0x11 << 8))
+
/*
* omap_chip bits
*
@@ -439,14 +448,21 @@ IS_OMAP_TYPE(3517, 0x3517)
#define CHIP_IS_OMAP4430ES2_1 (1 << 12)
#define CHIP_IS_OMAP4430ES2_2 (1 << 13)
#define CHIP_IS_TI816X (1 << 14)
+#define CHIP_IS_OMAP4460ES1_0 (1 << 15)
+#define CHIP_IS_OMAP4460ES1_1 (1 << 16)
#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430)
-#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \
+#define CHIP_IS_OMAP443X (CHIP_IS_OMAP4430ES1 | \
CHIP_IS_OMAP4430ES2 | \
CHIP_IS_OMAP4430ES2_1 | \
CHIP_IS_OMAP4430ES2_2)
+#define CHIP_IS_OMAP446X (CHIP_IS_OMAP4460ES1_0 | \
+ CHIP_IS_OMAP4460ES1_1)
+
+#define CHIP_IS_OMAP44XX (CHIP_IS_OMAP443X | CHIP_IS_OMAP446X)
+
/*
* "GE" here represents "greater than or equal to" in terms of ES
* levels. So CHIP_GE_OMAP3430ES2 is intended to match all OMAP3430
@@ -494,4 +510,23 @@ OMAP3_HAS_FEATURE(192mhz_clk, 192MHZ_CLK)
OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP)
OMAP3_HAS_FEATURE(sdrc, SDRC)
+/*
+ * Runtime detection of OMAP4 features
+ */
+extern u32 omap4_features;
+
+#define OMAP4_HAS_MPU_1GHZ BIT(0)
+#define OMAP4_HAS_MPU_1_2GHZ BIT(1)
+#define OMAP4_HAS_MPU_1_5GHZ BIT(2)
+
+#define OMAP4_HAS_FEATURE(feat, flag) \
+static inline unsigned int omap4_has_ ##feat(void) \
+{ \
+ return omap4_features & OMAP4_HAS_ ##flag; \
+} \
+
+OMAP4_HAS_FEATURE(mpu_1ghz, MPU_1GHZ)
+OMAP4_HAS_FEATURE(mpu_1_2ghz, MPU_1_2GHZ)
+OMAP4_HAS_FEATURE(mpu_1_5ghz, MPU_1_5GHZ)
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index d6c70d2..aaa676f 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -35,6 +35,7 @@
#ifndef __ASM_ARCH_DMTIMER_H
#define __ASM_ARCH_DMTIMER_H
+#include <linux/spinlock_types.h>
/* clock sources */
#define OMAP_TIMER_SRC_SYS_CLK 0x00
#define OMAP_TIMER_SRC_32_KHZ 0x01
@@ -55,41 +56,103 @@
* in OMAP4 can be distinguished.
*/
#define OMAP_TIMER_IP_VERSION_1 0x1
-struct omap_dm_timer;
+#define OMAP_TIMER_IP_VERSION_2 0x2
+
+struct omap_secure_timer_dev_attr {
+ bool is_secure_timer;
+};
+
+struct timer_regs {
+ u32 tidr;
+ u32 tiocp_cfg;
+ u32 tistat;
+ u32 tisr;
+ u32 tier;
+ u32 twer;
+ u32 tclr;
+ u32 tcrr;
+ u32 tldr;
+ u32 ttrg;
+ u32 twps;
+ u32 tmar;
+ u32 tcar1;
+ u32 tsicr;
+ u32 tcar2;
+ u32 tpir;
+ u32 tnir;
+ u32 tcvr;
+ u32 tocr;
+ u32 towr;
+};
+
+struct omap_dm_timer {
+ int irq;
+ struct clk *fclk;
+ void __iomem *io_base;
+ unsigned reserved:1;
+ unsigned enabled:1;
+ unsigned posted:1;
+ unsigned is_early_init:1;
+ unsigned needs_manual_reset:1;
+ spinlock_t lock;
+ u8 func_offset;
+ u8 intr_offset;
+ bool loses_context;
+ bool context_saved;
+ u32 ctx_loss_count;
+ struct timer_regs context;
+ struct platform_device *pdev;
+ struct list_head node;
+
+};
+
extern struct omap_dm_timer *gptimer_wakeup;
extern struct sys_timer omap_timer;
struct clk;
-int omap_dm_timer_init(void);
+struct dmtimer_platform_data {
+ int (*set_timer_src)(struct platform_device *pdev, int source);
+ int timer_ip_type;
+ u32 is_early_init:1;
+ u32 needs_manual_reset:1;
+ bool loses_context;
+
+};
struct omap_dm_timer *omap_dm_timer_request(void);
struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id);
-void omap_dm_timer_free(struct omap_dm_timer *timer);
-void omap_dm_timer_enable(struct omap_dm_timer *timer);
-void omap_dm_timer_disable(struct omap_dm_timer *timer);
+int omap_dm_timer_free(struct omap_dm_timer *timer);
+int omap_dm_timer_enable(struct omap_dm_timer *timer);
+int omap_dm_timer_disable(struct omap_dm_timer *timer);
int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer);
-void omap_dm_timer_trigger(struct omap_dm_timer *timer);
-void omap_dm_timer_start(struct omap_dm_timer *timer);
-void omap_dm_timer_stop(struct omap_dm_timer *timer);
+int omap_dm_timer_trigger(struct omap_dm_timer *timer);
+int omap_dm_timer_start(struct omap_dm_timer *timer);
+int omap_dm_timer_stop(struct omap_dm_timer *timer);
int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source);
-void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, unsigned int value);
-void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, unsigned int value);
-void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match);
-void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, int toggle, int trigger);
-void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler);
-
-void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value);
+int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
+ unsigned int value);
+int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
+ int autoreload, unsigned int value);
+int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+ unsigned int match);
+int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+ int toggle, int trigger);
+int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler);
+
+int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+ unsigned int value);
unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer);
-void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value);
+int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value);
unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer);
-void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value);
+int omap_dm_timer_write_counter(struct omap_dm_timer *timer,
+ unsigned int value);
int omap_dm_timers_active(void);
diff --git a/arch/arm/plat-omap/include/plat/dsp.h b/arch/arm/plat-omap/include/plat/dsp.h
index 9c604b3..14f1228 100644
--- a/arch/arm/plat-omap/include/plat/dsp.h
+++ b/arch/arm/plat-omap/include/plat/dsp.h
@@ -24,8 +24,12 @@ struct omap_dsp_platform_data {
#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
extern void omap_dsp_reserve_sdram_memblock(void);
+phys_addr_t omap_dsp_get_mempool_size(void);
+phys_addr_t omap_dsp_get_mempool_base(void);
#else
static inline void omap_dsp_reserve_sdram_memblock(void) { }
+static inline phys_addr_t omap_dsp_get_mempool_size(void) { return 0; }
+static inline phys_addr_t omap_dsp_get_mempool_base(void) { return 0; }
#endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/dsscomp.h b/arch/arm/plat-omap/include/plat/dsscomp.h
new file mode 100644
index 0000000..d41b73a
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/dsscomp.h
@@ -0,0 +1,25 @@
+#ifndef _ARCH_ARM_PLAT_OMAP_DSSCOMP_H
+#define _ARCH_ARM_PLAT_OMAP_DSSCOMP_H
+
+#include <video/omapdss.h>
+
+/* queuing operations */
+typedef struct dsscomp_data *dsscomp_t; /* handle */
+
+dsscomp_t dsscomp_new(struct omap_overlay_manager *mgr);
+u32 dsscomp_get_ovls(dsscomp_t comp);
+int dsscomp_set_ovl(dsscomp_t comp, struct dss2_ovl_info *ovl);
+int dsscomp_get_ovl(dsscomp_t comp, u32 ix, struct dss2_ovl_info *ovl);
+int dsscomp_set_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr);
+int dsscomp_get_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr);
+int dsscomp_setup(dsscomp_t comp, enum dsscomp_setup_mode mode,
+ struct dss2_rect_t win);
+int dsscomp_delayed_apply(dsscomp_t comp);
+void dsscomp_drop(dsscomp_t c);
+
+struct tiler_pa_info;
+int dsscomp_gralloc_queue(struct dsscomp_setup_dispc_data *d,
+ struct tiler_pa_info **pas,
+ bool early_callback,
+ void (*cb_fn)(void *, int), void *cb_arg);
+#endif
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index ec97e00..90eae5c 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -162,35 +162,61 @@
IH_MPUIO_BASE + ((nr) & 0x0f) : \
IH_GPIO_BASE + (nr))
-#define METHOD_MPUIO 0
-#define METHOD_GPIO_1510 1
-#define METHOD_GPIO_1610 2
-#define METHOD_GPIO_7XX 3
-#define METHOD_GPIO_24XX 5
-#define METHOD_GPIO_44XX 6
-
struct omap_gpio_dev_attr {
int bank_width; /* GPIO bank width */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
};
+struct omap_gpio_reg_offs {
+ u16 revision;
+ u16 direction;
+ u16 datain;
+ u16 dataout;
+ u16 set_dataout;
+ u16 clr_dataout;
+ u16 irqstatus;
+ u16 irqstatus2;
+ u16 irqenable;
+ u16 irqenable2;
+ u16 set_irqenable;
+ u16 clr_irqenable;
+ u16 debounce;
+ u16 debounce_en;
+ u16 ctrl;
+ u16 wkup_status;
+ u16 wkup_clear;
+ u16 wkup_set;
+ u16 leveldetect0;
+ u16 leveldetect1;
+ u16 risingdetect;
+ u16 fallingdetect;
+ u16 irqctrl;
+ u16 edgectrl1;
+ u16 edgectrl2;
+ /* Not applicable for OMAP2+ as hwmod layer takes care of sysconfig */
+ u16 sysconfig;
+ u16 pinctrl;
+
+ bool irqenable_inv;
+};
+
struct omap_gpio_platform_data {
u16 virtual_irq_start;
- int bank_type;
int bank_width; /* GPIO bank width */
int bank_stride; /* Only needed for omap1 MPUIO */
+ bool suspend_support; /* If Bank supports suspend/resume operations */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
-};
+ bool loses_context; /* whether the bank would ever lose context */
+ bool is_mpuio; /* whether the bank is of type MPUIO */
+ u32 non_wakeup_gpios;
-/* TODO: Analyze removing gpio_bank_count usage from driver code */
-extern int gpio_bank_count;
+ struct omap_gpio_reg_offs *regs;
+};
-extern void omap2_gpio_prepare_for_idle(int off_mode);
-extern void omap2_gpio_resume_after_idle(void);
+extern int omap2_gpio_prepare_for_idle(int off_mode, bool suspend);
+extern void omap2_gpio_resume_after_idle(int off_mode);
extern void omap_set_gpio_debounce(int gpio, int enable);
extern void omap_set_gpio_debounce_time(int gpio, int enable);
-extern void omap_gpio_save_context(void);
-extern void omap_gpio_restore_context(void);
/*-------------------------------------------------------------------------*/
/* Wrappers for "new style" GPIO calls, using the new infrastructure
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 1527929..d668790 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -148,8 +148,8 @@ extern int gpmc_cs_reserved(int cs);
extern int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
unsigned int u32_count, int is_write);
extern int gpmc_prefetch_reset(int cs);
-extern void omap3_gpmc_save_context(void);
-extern void omap3_gpmc_restore_context(void);
+extern void omap_gpmc_save_context(void);
+extern void omap_gpmc_restore_context(void);
extern int gpmc_read_status(int cmd);
extern int gpmc_cs_configure(int cs, int cmd, int wval);
extern int gpmc_nand_read(int cs, int cmd);
diff --git a/arch/arm/plat-omap/include/plat/gpu.h b/arch/arm/plat-omap/include/plat/gpu.h
new file mode 100644
index 0000000..0a6313b
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/gpu.h
@@ -0,0 +1,39 @@
+/*
+ * arch/arm/plat-omap/include/plat/gpu.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef OMAP_GPU_H
+#define OMAP_GPU_H
+
+#include <plat/omap-pm.h>
+#include <linux/platform_device.h>
+
+struct gpu_platform_data {
+
+ /* Number of overdrive frequencies */
+ unsigned int ovfreqs;
+
+ void (*set_min_bus_tput)(struct device *dev, u8 agent_id,
+ unsigned long r);
+ int (*device_scale) (struct device *req_dev, struct device *target_dev,
+ unsigned long rate);
+ int (*device_enable) (struct platform_device *pdev);
+ int (*device_shutdown) (struct platform_device *pdev);
+ int (*device_idle) (struct platform_device *pdev);
+};
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
index d72ec85..a2f7d31 100644
--- a/arch/arm/plat-omap/include/plat/io.h
+++ b/arch/arm/plat-omap/include/plat/io.h
@@ -228,12 +228,12 @@
#define OMAP44XX_EMIF2_PHYS OMAP44XX_EMIF2_BASE
/* 0x4d000000 --> 0xfd200000 */
-#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF2_PHYS + OMAP4_L3_PER_IO_OFFSET)
+#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF1_VIRT + SZ_1M)
#define OMAP44XX_EMIF2_SIZE SZ_1M
#define OMAP44XX_DMM_PHYS OMAP44XX_DMM_BASE
/* 0x4e000000 --> 0xfd300000 */
-#define OMAP44XX_DMM_VIRT (OMAP44XX_DMM_PHYS + OMAP4_L3_PER_IO_OFFSET)
+#define OMAP44XX_DMM_VIRT (OMAP44XX_EMIF2_VIRT + SZ_1M)
#define OMAP44XX_DMM_SIZE SZ_1M
/*
* ----------------------------------------------------------------------------
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 174f1b9..ed33ddf 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -13,6 +13,8 @@
#ifndef __MACH_IOMMU_H
#define __MACH_IOMMU_H
+#include <linux/pm_qos_params.h>
+
struct iotlb_entry {
u32 da;
u32 pa;
@@ -28,7 +30,6 @@ struct iotlb_entry {
struct iommu {
const char *name;
struct module *owner;
- struct clk *clk;
void __iomem *regbase;
struct device *dev;
void *isr_priv;
@@ -53,6 +54,10 @@ struct iommu {
void *ctx; /* iommu context: registres saved area */
u32 da_start;
u32 da_end;
+ struct platform_device *pdev;
+ struct pm_qos_request_list *qos_request;
+ void *secure_ttb;
+ bool secure_mode;
};
struct cr_regs {
@@ -104,10 +109,12 @@ struct iommu_functions {
struct iommu_platform_data {
const char *name;
- const char *clk_name;
+ const char *oh_name;
const int nr_tlb_entries;
u32 da_start;
u32 da_end;
+ int irq;
+ void __iomem *io_base;
};
/* IOMMU errors */
@@ -174,6 +181,8 @@ extern int iommu_set_isr(const char *name,
void *priv),
void *isr_priv);
+extern int iommu_set_secure(const char *name, bool enable, void *data);
+
extern void iommu_save_ctx(struct iommu *obj);
extern void iommu_restore_ctx(struct iommu *obj);
diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h
index 10ad05f..45b2e36 100644
--- a/arch/arm/plat-omap/include/plat/iommu2.h
+++ b/arch/arm/plat-omap/include/plat/iommu2.h
@@ -36,6 +36,7 @@
#define MMU_READ_CAM 0x68
#define MMU_READ_RAM 0x6c
#define MMU_EMU_FAULT_AD 0x70
+#define MMU_GP_REG 0x88
#define MMU_REG_SIZE 256
diff --git a/arch/arm/plat-omap/include/plat/irqs-44xx.h b/arch/arm/plat-omap/include/plat/irqs-44xx.h
index 518322c..78839f1 100644
--- a/arch/arm/plat-omap/include/plat/irqs-44xx.h
+++ b/arch/arm/plat-omap/include/plat/irqs-44xx.h
@@ -141,4 +141,13 @@
#define OMAP44XX_IRQ_KBD_CTL (120 + OMAP44XX_IRQ_GIC_START)
#define OMAP44XX_IRQ_UNIPRO1 (124 + OMAP44XX_IRQ_GIC_START)
+/*
+ * GIC interrupts 54, 55, 60, 105, 106, 121, 122, 123, 125, and 127 are tied
+ * low, and can be repurposed as SW triggered IRQs
+ */
+#define OMAP44XX_IRQ_FIQ_DEBUGGER (54 + OMAP44XX_IRQ_GIC_START)
+#define OMAP44XX_IRQ_THERMAL_PROXY (55 + OMAP44XX_IRQ_GIC_START)
+#define OMAP44XX_IRQ_CPUIDLE_POKE0 (60 + OMAP44XX_IRQ_GIC_START)
+#define OMAP44XX_IRQ_CPUIDLE_POKE1 (105 + OMAP44XX_IRQ_GIC_START)
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 5a25098..2cfba51 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -407,11 +407,19 @@
#endif
#define TWL6030_IRQ_END (TWL6030_IRQ_BASE + TWL6030_BASE_NR_IRQS)
+#define TWL6040_CODEC_IRQ_BASE TWL6030_IRQ_END
+#ifdef CONFIG_TWL6040_CODEC
+#define TWL6040_CODEC_NR_IRQS 6
+#else
+#define TWL6040_CODEC_NR_IRQS 0
+#endif
+#define TWL6040_CODEC_IRQ_END (TWL6040_CODEC_IRQ_BASE + TWL6040_CODEC_NR_IRQS)
+
/* Total number of interrupts depends on the enabled blocks above */
-#if (TWL4030_GPIO_IRQ_END > TWL6030_IRQ_END)
+#if (TWL4030_GPIO_IRQ_END > TWL6040_CODEC_IRQ_END)
#define TWL_IRQ_END TWL4030_GPIO_IRQ_END
#else
-#define TWL_IRQ_END TWL6030_IRQ_END
+#define TWL_IRQ_END TWL6040_CODEC_IRQ_END
#endif
/* GPMC related */
diff --git a/arch/arm/plat-omap/include/plat/mcasp.h b/arch/arm/plat-omap/include/plat/mcasp.h
new file mode 100644
index 0000000..aead0a0
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/mcasp.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_PLAT_MCASP_H__
+#define __OMAP_PLAT_MCASP_H__
+
+#include <linux/platform_device.h>
+
+/* The SPDIF bit clock is derived from the McASP functional clock.
+ * The McASP has two programmable clock dividers (aclkxdiv and
+ * ahclkxdiv) that are configured via the registers MCASP_ACLKXCTL
+ * and MCASP_AHCLKXCTL. For SPDIF the bit clock frequency should be
+ * 128 * sample rate freq. Therefore...
+ *
+ * McASP functional clock = aclkxdiv * ahclkxdiv * 128 * sample rate
+ *
+ * For each sample rate supported the user must define the aclkxdiv
+ * and ahclkxdiv values that are passed to the McASP driver via the
+ * following structure. The McASP functional clock frequency can be
+ * configured also, and this is pass to the McASP driver via the
+ * omap_mcasp_platform_data structure below.
+ */
+struct omap_mcasp_configs {
+ unsigned int sampling_rate;
+ u16 aclkxdiv;
+ u16 ahclkxdiv;
+};
+
+struct omap_mcasp_platform_data {
+ unsigned long mcasp_fclk_rate;
+ struct omap_mcasp_configs *mcasp_configs;
+ unsigned int num_configs;
+};
+
+void omap_init_mcasp(struct omap_mcasp_platform_data *pdata);
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index f8f690a..ddada0b 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -403,6 +403,8 @@ struct omap_mcbsp_platform_data {
#endif
u16 buffer_size;
unsigned int mcbsp_config_type;
+ char clks_pad_src[30];
+ char clks_prcm_src[30];
};
struct omap_mcbsp_st_data {
@@ -448,6 +450,8 @@ struct omap_mcbsp {
struct clk *fclk;
#ifdef CONFIG_ARCH_OMAP3
struct omap_mcbsp_st_data *st_data;
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
int dma_op_mode;
u16 max_tx_thres;
u16 max_rx_thres;
@@ -474,7 +478,7 @@ int omap_mcbsp_init(void);
void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
struct omap_mcbsp_platform_data *config, int size);
void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config);
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold);
void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold);
u16 omap_mcbsp_get_max_tx_threshold(unsigned int id);
diff --git a/arch/arm/plat-omap/include/plat/mcpdm.h b/arch/arm/plat-omap/include/plat/mcpdm.h
new file mode 100644
index 0000000..19ae03b
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/mcpdm.h
@@ -0,0 +1,28 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_PLAT_MCPDM_H__
+#define __OMAP_PLAT_MCPDM_H__
+
+#include <linux/platform_device.h>
+
+struct omap_mcpdm_platform_data {
+ bool (*was_context_lost)(struct device *dev);
+};
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/mcspi.h b/arch/arm/plat-omap/include/plat/mcspi.h
index 3d51b18..091caa1 100644
--- a/arch/arm/plat-omap/include/plat/mcspi.h
+++ b/arch/arm/plat-omap/include/plat/mcspi.h
@@ -21,6 +21,8 @@ struct omap2_mcspi_device_config {
/* Do we want one channel enabled at the same time? */
unsigned single_channel:1;
+ /* Swap data lines */
+ unsigned swap_datalines;
};
#endif
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index c7b8741..b95aabd 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <asm/mach/mmc.h>
#include <plat/board.h>
#define OMAP15XX_NR_MMC 1
@@ -60,9 +61,6 @@ struct omap_mmc_platform_data {
int (*suspend)(struct device *dev, int slot);
int (*resume)(struct device *dev, int slot);
- /* Return context loss count due to PM states changing */
- int (*get_context_loss_count)(struct device *dev);
-
u64 dma_mask;
/* Integrating attributes from the omap_hwmod layer */
@@ -108,8 +106,9 @@ struct omap_mmc_platform_data {
unsigned vcc_aux_disable_is_sleep:1;
/* we can put the features above into this variable */
-#define HSMMC_HAS_PBIAS (1 << 0)
-#define HSMMC_HAS_UPDATED_RESET (1 << 1)
+#define HSMMC_HAS_PBIAS (1 << 0)
+#define HSMMC_HAS_UPDATED_RESET (1 << 1)
+#define HSMMC_HAS_48MHZ_MASTER_CLK (1 << 2)
unsigned features;
int switch_pin; /* gpio (card detect) */
@@ -146,6 +145,9 @@ struct omap_mmc_platform_data {
int card_detect_irq;
int (*card_detect)(struct device *dev, int slot);
+ /* Additional mmc configuration */
+ struct mmc_platform_data mmc_data;
+
unsigned int ban_openended:1;
} slots[OMAP_MMC_MAX_SLOTS];
diff --git a/arch/arm/plat-omap/include/plat/omap-pm.h b/arch/arm/plat-omap/include/plat/omap-pm.h
index c0a7520..2efbff5 100644
--- a/arch/arm/plat-omap/include/plat/omap-pm.h
+++ b/arch/arm/plat-omap/include/plat/omap-pm.h
@@ -17,7 +17,9 @@
#include <linux/device.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
+#include <linux/pm_qos_params.h>
#include <linux/opp.h>
+#include <linux/pm_qos_params.h>
/*
* agent_id values for use with omap_pm_set_min_bus_tput():
@@ -73,7 +75,8 @@ void omap_pm_if_exit(void);
/**
* omap_pm_set_max_mpu_wakeup_lat - set the maximum MPU wakeup latency
- * @dev: struct device * requesting the constraint
+ * @qos_request: handle for the constraint. The pointer should be
+ * initialized to NULL
* @t: maximum MPU wakeup latency in microseconds
*
* Request that the maximum interrupt latency for the MPU to be no
@@ -105,7 +108,8 @@ void omap_pm_if_exit(void);
* Returns -EINVAL for an invalid argument, -ERANGE if the constraint
* is not satisfiable, or 0 upon success.
*/
-int omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t);
+int omap_pm_set_max_mpu_wakeup_lat(struct pm_qos_request_list **qos_request,
+ long t);
/**
@@ -132,12 +136,12 @@ int omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t);
*
* Multiple calls to omap_pm_set_min_bus_tput() will replace the
* previous rate value for this device. To remove the interconnect
- * throughput restriction for this device, call with r = 0.
+ * throughput restriction for this device, call with r = -1.
*
* Returns -EINVAL for an invalid argument, -ERANGE if the constraint
* is not satisfiable, or 0 upon success.
*/
-int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r);
+int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, long r);
/**
@@ -172,7 +176,8 @@ int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
/**
* omap_pm_set_max_sdma_lat - set the maximum system DMA transfer start latency
- * @dev: struct device *
+ * @qos_request: handle for the constraint. The pointer should be
+ * initialized to NULL
* @t: maximum DMA transfer start latency in microseconds
*
* Request that the maximum system DMA transfer start latency for this
@@ -197,7 +202,8 @@ int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
* Returns -EINVAL for an invalid argument, -ERANGE if the constraint
* is not satisfiable, or 0 upon success.
*/
-int omap_pm_set_max_sdma_lat(struct device *dev, long t);
+int omap_pm_set_max_sdma_lat(struct pm_qos_request_list **qos_request,
+ long t);
/**
@@ -337,24 +343,26 @@ unsigned long omap_pm_cpu_get_freq(void);
*/
/**
- * omap_pm_get_dev_context_loss_count - return count of times dev has lost ctx
- * @dev: struct device *
- *
- * This function returns the number of times that the device @dev has
- * lost its internal context. This generally occurs on a powerdomain
- * transition to OFF. Drivers use this as an optimization to avoid restoring
- * context if the device hasn't lost it. To use, drivers should initially
- * call this in their context save functions and store the result. Early in
- * the driver's context restore function, the driver should call this function
- * again, and compare the result to the stored counter. If they differ, the
- * driver must restore device context. If the number of context losses
- * exceeds the maximum positive integer, the function will wrap to 0 and
- * continue counting. Returns the number of context losses for this device,
- * or zero upon error.
+ * omap_pm_was_context_lost - return true if a device lost hw context
+ *
+ * This function returns a bool value indication if a device has lost
+ * its context. Depending on the HW implementation of the device, Context
+ * can be lost in OFF or OSWR. This function reads and *CLEARS* the context
+ * lost registers for the device.
+ */
+bool omap_pm_was_context_lost(struct device *dev);
+
+/**
+ * omap_pm_set_min_mpu_freq - sets the min frequency the mpu should be allowed
+ * to run. The function works with a granularity of 1000000. Any frequency requested,
+ * will set the mpu frequency to the closet higher frequency that can match the request.
+ * to release the constraint, the f parameter should be passed as -1.
*/
-u32 omap_pm_get_dev_context_loss_count(struct device *dev);
+int omap_pm_set_min_mpu_freq(struct device *dev, unsigned long f);
void omap_pm_enable_off_mode(void);
void omap_pm_disable_off_mode(void);
+extern bool off_mode_enabled;
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
index 2682043..2fbb5f0 100644
--- a/arch/arm/plat-omap/include/plat/omap-serial.h
+++ b/arch/arm/plat-omap/include/plat/omap-serial.h
@@ -36,13 +36,14 @@
/* WER = 0x7F
* Enable module level wakeup in WER reg
*/
-#define OMAP_UART_WER_MOD_WKUP 0X7F
+#define OMAP2_UART_WER_MOD_WKUP 0X7F
+#define OMAP4_UART_WER_MOD_WKUP 0XFF
/* Enable XON/XOFF flow control on output */
-#define OMAP_UART_SW_TX 0x04
+#define OMAP_UART_SW_TX 0x8
/* Enable XON/XOFF flow control on input */
-#define OMAP_UART_SW_RX 0x04
+#define OMAP_UART_SW_RX 0x2
#define OMAP_UART_SYSC_RESET 0X07
#define OMAP_UART_TCR_TRIG 0X0F
@@ -51,18 +52,51 @@
#define OMAP_UART_DMA_CH_FREE -1
-#define RX_TIMEOUT (3 * HZ)
+#define RX_TIMEOUT (3 * HZ) /* RX DMA timeout (jiffies) */
+
+#define DEFAULT_RXDMA_TIMEOUT (3 * HZ) /* RX DMA timeout (jiffies) */
+#define DEFAULT_RXDMA_POLLRATE 1 /* RX DMA polling rate (us) */
+#define DEFAULT_RXDMA_BUFSIZE 4096 /* RX DMA buffer size */
+#define DEFAULT_AUTOSUSPEND_DELAY 3000 /* Runtime autosuspend (msecs)*/
+
+/*
+ * (Errata i659) - From OMAP4430 ES 2.0 onwards set
+ * tx_threshold while using UART in DMA Mode
+ * and ensure tx_threshold + tx_trigger <= 63
+ */
+#define UART_MDR3 0x20
+#define UART_TX_DMA_THRESHOLD 0x21
+#define SET_DMA_TX_THRESHOLD BIT(2)
+/* Setting TX Threshold Level to 62 */
+#define TX_FIFO_THR_LVL 0x3E
+
#define OMAP_MAX_HSUART_PORTS 4
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
+#define OMAP4_UART_ERRATA_i659_TX_THR BIT(1)
+
struct omap_uart_port_info {
- bool dma_enabled; /* To specify DMA Mode */
+ int dma_rx_buf_size;/* DMA Rx Buffer Size */
+ int dma_rx_timeout; /* DMA RX timeout */
+ unsigned int idle_timeout; /* Omap Uart Idle Time out */
+ int use_dma; /* DMA Enable / Disable */
unsigned int uartclk; /* UART clock rate */
- void __iomem *membase; /* ioremap cookie or NULL */
- resource_size_t mapbase; /* resource base */
- unsigned long irqflags; /* request_irq flags */
upf_t flags; /* UPF_* flags */
+ unsigned int errata;
+ unsigned int console_uart;
+ u16 wer; /* Module Wakeup register */
+ unsigned int dma_rx_poll_rate; /* DMA RX poll_rate */
+ unsigned int auto_sus_timeout; /* Auto_suspend timeout */
+ unsigned rts_mux_driver_control:1;
+
+ void (*enable_wakeup)(struct platform_device *, bool);
+ bool (*chk_wakeup)(struct platform_device *);
+ void (*wake_peer)(struct uart_port *);
+ void __iomem *wk_st;
+ void __iomem *wk_en;
+ u32 wk_mask;
};
struct uart_omap_dma {
@@ -86,8 +120,9 @@ struct uart_omap_dma {
spinlock_t rx_lock;
/* timer to poll activity on rx dma */
struct timer_list rx_timer;
- int rx_buf_size;
- int rx_timeout;
+ unsigned int rx_buf_size;
+ unsigned int rx_poll_rate;
+ unsigned int rx_timeout;
};
struct uart_omap_port {
@@ -100,8 +135,13 @@ struct uart_omap_port {
unsigned char mcr;
unsigned char fcr;
unsigned char efr;
+ unsigned char dll;
+ unsigned char dlh;
+ unsigned char mdr1;
+ unsigned char wer;
int use_dma;
+ bool suspended;
/*
* Some bits in registers are cleared on a read, so they must
* be saved whenever the register is read but the bits will not
@@ -110,7 +150,16 @@ struct uart_omap_port {
unsigned int lsr_break_flag;
unsigned char msr_saved_flags;
char name[20];
+ unsigned int console_lock;
unsigned long port_activity;
+ int context_loss_cnt;
+ /* RTS control via driver */
+ unsigned rts_mux_driver_control:1;
+ unsigned rts_pullup_in_suspend:1;
+
+ unsigned int errata;
+ void (*enable_wakeup)(struct platform_device *, bool);
+ bool (*chk_wakeup)(struct platform_device *);
+ void (*wake_peer)(struct uart_port *);
};
-
#endif /* __OMAP_SERIAL_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index ea2b8a6..f6da497 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -22,6 +22,9 @@
#define L4_PER_44XX_BASE 0x48000000
#define L4_EMU_44XX_BASE 0x54000000
#define L3_44XX_BASE 0x44000000
+#define L3_44XX_BASE_CLK1 L3_44XX_BASE
+#define L3_44XX_BASE_CLK2 0x44800000
+#define L3_44XX_BASE_CLK3 0x45000000
#define OMAP44XX_EMIF1_BASE 0x4c000000
#define OMAP44XX_EMIF2_BASE 0x4d000000
#define OMAP44XX_DMM_BASE 0x4e000000
@@ -34,6 +37,7 @@
#define OMAP44XX_GPMC_BASE 0x50000000
#define OMAP443X_SCM_BASE 0x4a002000
#define OMAP443X_CTRL_BASE 0x4a100000
+#define OMAP443X_CTRL_WK_BASE 0x4a31e000
#define OMAP44XX_IC_BASE 0x48200000
#define OMAP44XX_IVA_INTC_BASE 0x40000000
#define IRQ_SIR_IRQ 0x0040
@@ -45,6 +49,7 @@
#define OMAP44XX_WKUPGEN_BASE 0x48281000
#define OMAP44XX_MCPDM_BASE 0x40132000
#define OMAP44XX_MCPDM_L3_BASE 0x49032000
+#define OMAP44XX_SAR_RAM_BASE 0x4a326000
#define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000)
#define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000)
@@ -57,5 +62,7 @@
#define OMAP44XX_HSUSB_OHCI_BASE (L4_44XX_BASE + 0x64800)
#define OMAP44XX_HSUSB_EHCI_BASE (L4_44XX_BASE + 0x64C00)
+#define OMAP44XX_C2C_BASE 0x5c000000
+
#endif /* __ASM_ARCH_OMAP44XX_H */
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index e4c349f..70d31d0 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -107,7 +107,7 @@ void __iomem *omap_device_get_rt_va(struct omap_device *od);
int omap_device_align_pm_lat(struct platform_device *pdev,
u32 new_wakeup_lat_limit);
struct powerdomain *omap_device_get_pwrdm(struct omap_device *od);
-u32 omap_device_get_context_loss_count(struct platform_device *pdev);
+int omap_device_get_context_loss_count(struct platform_device *pdev);
/* Other */
diff --git a/arch/arm/plat-omap/include/plat/omap_hsi.h b/arch/arm/plat-omap/include/plat/omap_hsi.h
new file mode 100644
index 0000000..1a75ed4
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap_hsi.h
@@ -0,0 +1,494 @@
+/*
+ * /mach/omap_hsi.h
+ *
+ * Hardware definitions for HSI and SSI.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* NOTE: This file defines the registers address offsets for both the
+ * SSI and HSI devices. Most of the registers share the same offset between
+ * these devices.
+ * When common or HSI only, the constants are name HSI*. Else the SSI specific
+ * constants are name HSI_SSI*
+ */
+
+#ifndef __OMAP_HSI_H__
+#define __OMAP_HSI_H__
+
+/* Set the HSI Functional Clock to 96MHz.
+ * This is to ensure HSI will function even at OPP50. */
+#define HSI_DEFAULT_FCLK 96000000 /* 96 MHz */
+
+
+#define HSI_PORT_OFFSET 0x1000
+
+/*
+ * GDD base addr : 0x48059000 (SSI)
+ * GDD base addr : 0x4A059000 (HSI)
+ */
+#define HSI_GDD_OFFSET 0x1000
+#define HSI_GDD_BASE HSI_GDD_OFFSET /* 0x9000 */
+
+/*
+ * HST base addr:
+ * port 1: 0x4805a000 (SSI) - 0x4A05a000 (HSI)
+ * port 2: 0x4805b000 (SSI) - 0x4a05b000 (HSI)
+ */
+#define HSI_HST_OFFSET 0x2000
+#define HSI_HST_BASE(port) (HSI_HST_OFFSET + (((port) - 1) *\
+ (HSI_PORT_OFFSET)))
+ /*
+ * HSR base addr:
+ * port 1: 0x4805a800 (SSI) - 0x4A05a800 (HSI)
+ * port 2: 0x4805b800 (SSI) - 0x4A05b800 (HSI)
+ */
+#define HSI_HSR_OFFSET 0x2800
+#define HSI_HSR_BASE(port) (HSI_HSR_OFFSET + (((port) - 1) *\
+ (HSI_PORT_OFFSET)))
+/*
+ * HSI SYS registers
+ */
+#define HSI_SYS_REVISION_REG 0x0000
+#define HSI_SSI_REV_MASK 0x000000ff
+#define HSI_SSI_REV_MAJOR 0xf0
+#define HSI_SSI_REV_MINOR 0x0f
+
+#define HSI_SYS_SYSCONFIG_REG 0x0010
+#define HSI_AUTOIDLE (1 << 0)
+#define HSI_SOFTRESET (1 << 1)
+#define HSI_FREE_EMU (1 << 2) /* Only for HSI */
+#define HSI_SIDLEMODE_FORCE 0
+#define HSI_SIDLEMODE_NO (1 << 3)
+#define HSI_SIDLEMODE_SMART (1 << 4)
+#define HSI_SIDLEMODE_SMART_WAKEUP (3 << 3)
+#define HSI_SIDLEMODE_MASK 0x00000018
+#define HSI_MIDLEMODE_FORCE 0
+#define HSI_MIDLEMODE_NO (1 << 12)
+#define HSI_MIDLEMODE_SMART (1 << 13)
+#define HSI_MIDLEMODE_SMART_WAKEUP (3 << 12)
+#define HSI_MIDLEMODE_MASK 0x00003000
+
+#define HSI_SYS_SYSSTATUS_REG 0x0014
+#define HSI_RESETDONE 1
+
+#define HSI_SYS_MPU_STATUS_BASE 0x0808
+#define HSI_SYS_MPU_STATUS_PORT_OFFSET 0x10
+#define HSI_SYS_MPU_STATUS_IRQ_OFFSET 8
+
+#define HSI_SYS_MPU_STATUS_REG(port, irq) \
+ (HSI_SYS_MPU_STATUS_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_STATUS_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_STATUS_IRQ_OFFSET)))
+#define HSI_SYS_MPU_ENABLE_BASE 0x080c
+#define HSI_SYS_MPU_ENABLE_PORT_OFFSET 0x10
+#define HSI_SYS_MPU_ENABLE_IRQ_OFFSET 8
+
+#define HSI_SYS_MPU_ENABLE_REG(port, irq) \
+ (HSI_SYS_MPU_ENABLE_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_ENABLE_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_ENABLE_IRQ_OFFSET)))
+#define HSI_HST_DATAACCEPT(channel) (((channel) < 8) ? \
+ (1 << (channel)) : \
+ (1 << ((channel) - 8)))
+#define HSI_HSR_DATAAVAILABLE(channel) ((channel) < 8 ? \
+ (1 << ((channel) + 8)) : \
+ (1 << ((channel) - 8 + 8)))
+#define HSI_HSR_DATAOVERRUN(channel) ((channel) < 8 ? \
+ (1 << ((channel) + 16)) : \
+ (1 << ((channel) - 8 + 16)))
+
+#define HSI_ERROROCCURED (1 << 24)
+#define HSI_BREAKDETECTED (1 << 25)
+#define HSI_CAWAKEDETECTED (1 << 26)
+
+#define HSI_SYS_GDD_MPU_IRQ_STATUS_REG 0x0800
+#define HSI_SYS_GDD_MPU_IRQ_ENABLE_REG 0x0804
+#define HSI_GDD_LCH(channel) (1 << (channel))
+
+
+#define HSI_SYS_WAKE_OFFSET 0x10
+#define HSI_SYS_WAKE_BASE 0x0c00
+#define HSI_SYS_WAKE_REG(port) (HSI_SYS_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SYS_CLEAR_WAKE_BASE 0x0c04
+#define HSI_SYS_CLEAR_WAKE_REG(port) (HSI_SYS_CLEAR_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SYS_SET_WAKE_BASE 0x0c08
+#define HSI_SYS_SET_WAKE_REG(port) (HSI_SYS_SET_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SSI_WAKE_MASK 0xff /* for SSI */
+#define HSI_WAKE_MASK 0xffff /* for HSI */
+#define HSI_SET_WAKE_4_WIRES (0 << 16)
+#define HSI_SET_WAKE_READY_LVL_0 (0 << 17)
+#define HSI_SET_WAKE(channel) (1 << (channel) |\
+ HSI_SET_WAKE_4_WIRES |\
+ HSI_SET_WAKE_READY_LVL_0)
+#define HSI_CLEAR_WAKE(channel) (1 << (channel))
+#define HSI_WAKE(channel) (1 << (channel))
+
+#define HSI_SYS_HWINFO_REG 0x0004 /* only for HSI */
+
+/* Additional registers definitions (for channels 8 .. 15) for HSI */
+#define HSI_SYS_MPU_U_STATUS_BASE 0x0408
+#define HSI_SYS_MPU_U_STATUS_REG(port, irq) \
+ (HSI_SYS_MPU_U_STATUS_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_STATUS_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_STATUS_IRQ_OFFSET)))
+
+#define HSI_SYS_MPU_U_ENABLE_BASE 0x040c
+#define HSI_SYS_MPU_U_ENABLE_REG(port, irq) \
+ (HSI_SYS_MPU_U_ENABLE_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_ENABLE_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_ENABLE_IRQ_OFFSET)))
+
+/*
+ * HSI HST registers
+ */
+#define HSI_HST_ID_REG(port) (HSI_HST_BASE(port) + 0x0000)
+
+#define HSI_HST_MODE_REG(port) (HSI_HST_BASE(port) + 0x0004)
+#define HSI_MODE_VAL_MASK 3
+#define HSI_MODE_SLEEP 0
+#define HSI_MODE_STREAM 1
+#define HSI_MODE_FRAME 2
+#define HSI_SSI_MODE_MULTIPOINTS 3 /* SSI only */
+#define HSI_FLOW_OFFSET 2 /* HSI only */
+#define HSI_FLOW_VAL_MASK 3 /* HSI only */
+#define HSI_FLOW_SYNCHRONIZED 0 /* HSI only */
+#define HSI_FLOW_PIPELINED 1 /* HSI only */
+#define HSI_FLOW_REAL_TIME 2 /* HSI only */
+#define HSI_HST_MODE_WAKE_CTRL_AUTO (1 << 4) /* HSI only */
+#define HSI_HST_MODE_WAKE_CTRL_SW (0 << 4) /* HSI only */
+
+#define HSI_HST_FRAMESIZE_REG(port) (HSI_HST_BASE(port) + 0x0008)
+#define HSI_FRAMESIZE_DEFAULT 31
+#define HSI_FRAMESIZE_MAX 0x1f
+
+#define HSI_HST_TXSTATE_REG(port) (HSI_HST_BASE(port) + 0x000c)
+#define HSI_HST_TXSTATE_VAL_MASK 0x07
+#define HSI_HST_TXSTATE_IDLE 0
+
+#define HSI_HST_BUFSTATE_REG(port) (HSI_HST_BASE(port) + 0x0010)
+#define HSI_HST_BUFSTATE_FIFO_REG(fifo) (((fifo) < 8) ? \
+ HSI_HST_BUFSTATE_REG(1) : \
+ HSI_HST_BUFSTATE_REG(2))
+#define HSI_BUFSTATE_CHANNEL(channel) ((channel) < 8 ? \
+ (1 << (channel)) : \
+ (1 << ((channel) - 8)))
+
+#define HSI_HST_DIVISOR_REG(port) (HSI_HST_BASE(port) + 0x0018)
+#define HSI_DIVISOR_DEFAULT 1
+#define HSI_SSI_MAX_TX_DIVISOR 0x7f /* for SSI */
+#define HSI_MAX_TX_DIVISOR 0xff /* for HSI */
+
+#define HSI_HST_BREAK_REG(port) (HSI_HST_BASE(port) + 0x0020)
+#define HSI_HST_CHANNELS_REG(port) (HSI_HST_BASE(port) + 0x0024)
+#define HSI_CHANNELS_DEFAULT 4
+#define HSI_SSI_CHANNELS_MAX 8 /* for SSI */
+#define HSI_CHANNELS_MAX 16 /* for HSI */
+
+#define HSI_HST_ARBMODE_REG(port) (HSI_HST_BASE(port) + 0x0028)
+#define HSI_ARBMODE_ROUNDROBIN 0
+#define HSI_ARBMODE_PRIORITY 1
+
+#define HSI_HST_BUFFER_BASE(port) (HSI_HST_BASE(port) + 0x0080)
+#define HSI_HST_BUFFER_CH_REG(port, channel) (HSI_HST_BUFFER_BASE(port) +\
+ ((channel) * 4))
+#define HSI_HST_BUFFER_FIFO_REG(fifo) (((fifo) < 8) ? \
+ (HSI_HST_BUFFER_CH_REG(1, (fifo))) : \
+ (HSI_HST_BUFFER_CH_REG(2, (fifo) - 8)))
+
+#define HSI_HST_SWAPBUF_BASE(port) (HSI_HST_BASE(port) + 0x00c0)
+#define HSI_HST_SWAPBUF_CH_REG(port, channel) (HSI_HST_SWAPBUF_BASE(port) +\
+ ((channel) * 4))
+
+
+/* Additional registers for HSI */
+#define HSI_HST_FIFO_COUNT 16
+#define HSI_HST_FIFO_SIZE 8
+#define HSI_HST_MAPPING_FIFO_REG(fifo) (HSI_HST_BASE(1) + 0x0100 +\
+ ((fifo) * 4))
+#define HSI_MAPPING_ENABLE 1
+#define HSI_MAPPING_CH_NUMBER_OFFSET 1
+#define HSI_MAPPING_PORT_NUMBER_OFFSET 7
+#define HSI_HST_MAPPING_THRESH_OFFSET 10
+#define HSI_HST_MAPPING_THRESH_VALUE (0x0 << HSI_HST_MAPPING_THRESH_OFFSET)
+
+/*
+ * HSI HSR registers
+ */
+#define HSI_HSR_ID_REG(port) (HSI_HSR_BASE(port) + 0x0000)
+
+#define HSI_HSR_MODE_REG(port) (HSI_HSR_BASE(port) + 0x0004)
+
+#define HSI_HSR_MODE_MODE_VAL_MASK (3 << 0) /* HSI only */
+#define HSI_HSR_MODE_FLOW_VAL_MASK (3 << 2) /* HSI only */
+#define HSI_HSR_MODE_WAKE_STATUS (1 << 4) /* HSI only */
+#define HSI_HSR_MODE_MODE_VAL_SLEEP 0xFFFFFFFC /* HSI only */
+
+#define HSI_HSR_FRAMESIZE_REG(port) (HSI_HSR_BASE(port) + 0x0008)
+
+#define HSI_HSR_RXSTATE_REG(port) (HSI_HSR_BASE(port) + 0x000c)
+
+#define HSI_HSR_BUFSTATE_REG(port) (HSI_HSR_BASE(port) + 0x0010)
+#define HSI_HSR_BUFSTATE_FIFO_REG(fifo) (((fifo) < 8) ? \
+ HSI_HSR_BUFSTATE_REG(1) : \
+ HSI_HSR_BUFSTATE_REG(2))
+
+#define HSI_HSR_BREAK_REG(port) (HSI_HSR_BASE(port) + 0x001c)
+
+#define HSI_HSR_ERROR_REG(port) (HSI_HSR_BASE(port) + 0x0020)
+#define HSI_HSR_ERROR_SIG 1
+#define HSI_HSR_ERROR_FTE (1 << 1) /* HSI only */
+#define HSI_HSR_ERROR_TBE (1 << 4) /* HSI only */
+#define HSI_HSR_ERROR_RME (1 << 7) /* HSI only */
+#define HSI_HSR_ERROR_TME (1 << 11) /* HSI only */
+
+#define HSI_HSR_ERRORACK_REG(port) (HSI_HSR_BASE(port) + 0x0024)
+
+#define HSI_HSR_CHANNELS_REG(port) (HSI_HSR_BASE(port) + 0x0028)
+
+#define HSI_HSR_OVERRUN_REG(port) (HSI_HSR_BASE(port) + 0x002c)
+
+#define HSI_HSR_OVERRUNACK_REG(port) (HSI_HSR_BASE(port) + 0x0030)
+
+#define HSI_HSR_COUNTERS_REG(port) (HSI_HSR_BASE(port) + 0x0034)
+#define SSI_TIMEOUT_REG(port) (HSI_HSR_COUNTERS_REG(port))
+#define HSI_TIMEOUT_DEFAULT 0 /* SSI only */
+#define HSI_SSI_RX_TIMEOUT_OFFSET 0 /* SSI only */
+#define HSI_SSI_RX_TIMEOUT_MASK 0x1ff /* SSI only */
+#define HSI_COUNTERS_FT_MASK 0x000fffff /* HSI only */
+#define HSI_COUNTERS_TB_MASK 0x00f00000 /* HSI only */
+#define HSI_COUNTERS_FB_MASK 0xff000000 /* HSI only */
+#define HSI_COUNTERS_FT_OFFSET 0 /* HSI only */
+#define HSI_COUNTERS_TB_OFFSET 20 /* HSI only */
+#define HSI_COUNTERS_FB_OFFSET 24 /* HSI only */
+/* Default FT value: 2 x max_bits_per_frame + 20% margin */
+#define HSI_COUNTERS_FT_DEFAULT (90 << HSI_COUNTERS_FT_OFFSET)
+#define HSI_COUNTERS_TB_DEFAULT (6 << HSI_COUNTERS_TB_OFFSET)
+#define HSI_COUNTERS_FB_DEFAULT (8 << HSI_COUNTERS_FB_OFFSET)
+#define HSI_HSR_COMBINE_COUNTERS(FB, TB, FT) \
+ (((FB << HSI_COUNTERS_FB_OFFSET) & HSI_COUNTERS_FB_MASK) \
+ ((TB << HSI_COUNTERS_TB_OFFSET) & HSI_COUNTERS_TB_MASK) \
+ ((FT << HSI_COUNTERS_FT_OFFSET) & HSI_COUNTERS_FT_MASK))
+#define SSI_SSR_COMBINE_COUNTERS(FT) \
+ ((FT << HSI_SSI_RX_TIMEOUT_OFFSET) & HSI_SSI_RX_TIMEOUT_MASK)
+
+#define HSI_HSR_BUFFER_BASE(port) (HSI_HSR_BASE(port) + 0x0080)
+#define HSI_HSR_BUFFER_CH_REG(port, channel) (HSI_HSR_BUFFER_BASE(port) +\
+ ((channel) * 4))
+#define HSI_HSR_BUFFER_FIFO_REG(fifo) (((fifo) < 8) ? \
+ (HSI_HSR_BUFFER_CH_REG(1, (fifo))) : \
+ (HSI_HSR_BUFFER_CH_REG(2, (fifo) - 8)))
+
+#define HSI_HSR_SWAPBUF_BASE(port) (HSI_HSR_BASE(port) + 0x00c0)
+#define HSI_HSR_SWAPBUF_CH_REG(port, channel) (HSI_HSR_SWAPBUF_BASE(port) +\
+ ((channel) * 4))
+
+/* Additional registers for HSI */
+#define HSI_HSR_FIFO_COUNT 16
+#define HSI_HSR_FIFO_SIZE 8
+#define HSI_HSR_MAPPING_FIFO_REG(fifo) (HSI_HSR_BASE(1) + 0x0100 +\
+ ((fifo) * 4))
+#define HSI_HSR_MAPPING_WORDS_MASK (0xf << 10)
+
+#define HSI_HSR_DLL_REG (HSI_HSR_BASE(1) + 0x0144)
+#define HSI_HSR_DLL_COCHRE 1
+#define HSI_HSR_DLL_COCHGR (1 << 4)
+#define HSI_HSR_DLL_INCO_MASK 0x0003ff00
+#define HSI_HSR_DLL_INCO_OFFSET 8
+
+#define HSI_HSR_DIVISOR_REG(port) (HSI_HSR_BASE(port) + 0x014C)
+#define HSI_HSR_DIVISOR_MASK 0xff
+#define HSI_MAX_RX_DIVISOR 0xff
+
+/*
+ * HSI GDD registers
+ */
+#define HSI_SSI_DMA_CHANNEL_MAX 8
+#define HSI_HSI_DMA_CHANNEL_MAX 16
+
+#define HSI_SSI_GDD_HW_ID_REG (HSI_GDD_BASE + 0x0000)
+
+#define HSI_SSI_GDD_PPORT_ID_REG (HSI_GDD_BASE + 0x0010)
+
+#define HSI_SSI_GDD_MPORT_ID_REG (HSI_GDD_BASE + 0x0014)
+
+#define HSI_SSI_GDD_PPORT_SR_REG (HSI_GDD_BASE + 0x0020)
+#define HSI_PPORT_ACTIVE_LCH_NUMBER_MASK 0xff
+
+#define HSI_GDD_MPORT_SR_REG (HSI_GDD_BASE + 0x0024)
+#define HSI_SSI_MPORT_ACTIVE_LCH_NUMBER_MASK 0xff
+
+#define HSI_SSI_GDD_TEST_REG (HSI_GDD_BASE + 0x0040)
+#define HSI_SSI_TEST 1
+
+#define HSI_GDD_GCR_REG (HSI_GDD_BASE + 0x0100)
+#define HSI_CLK_AUTOGATING_ON (1 << 3)
+#define HSI_SWITCH_OFF (1 << 0)
+
+#define HSI_GDD_GRST_REG (HSI_GDD_BASE + 0x0200)
+#define HSI_GDD_GRST_SWRESET 1
+
+#define HSI_GDD_CSDP_BASE (HSI_GDD_BASE + 0x0800)
+#define HSI_GDD_CSDP_OFFSET 0x40
+#define HSI_GDD_CSDP_REG(channel) (HSI_GDD_CSDP_BASE +\
+ ((channel) * HSI_GDD_CSDP_OFFSET))
+
+#define HSI_DST_BURST_EN_MASK 0xc000
+#define HSI_DST_SINGLE_ACCESS0 0
+#define HSI_DST_SINGLE_ACCESS (1 << 14)
+#define HSI_DST_BURST_4X32_BIT (2 << 14)
+#define HSI_DST_BURST_8x32_BIT (3 << 14)
+
+#define HSI_DST_MASK 0x1e00
+#define HSI_DST_MEMORY_PORT (8 << 9)
+#define HSI_DST_PERIPHERAL_PORT (9 << 9)
+
+#define HSI_SRC_BURST_EN_MASK 0x0180
+#define HSI_SRC_SINGLE_ACCESS0 0
+#define HSI_SRC_SINGLE_ACCESS (1 << 7)
+#define HSI_SRC_BURST_4x32_BIT (2 << 7)
+#define HSI_SRC_BURST_8x32_BIT (3 << 7)
+
+#define HSI_SRC_MASK 0x003c
+#define HSI_SRC_MEMORY_PORT (8 << 2)
+#define HSI_SRC_PERIPHERAL_PORT (9 << 2)
+
+#define HSI_DATA_TYPE_MASK 3
+#define HSI_DATA_TYPE_S32 2
+
+#define HSI_GDD_CCR_BASE (HSI_GDD_BASE + 0x0802)
+#define HSI_GDD_CCR_OFFSET 0x40
+#define HSI_GDD_CCR_REG(channel) (HSI_GDD_CCR_BASE +\
+ ((channel) * HSI_GDD_CCR_OFFSET))
+#define HSI_DST_AMODE_MASK (3 << 14)
+#define HSI_DST_AMODE_CONST 0
+#define HSI_DST_AMODE_POSTINC (1 << 14)
+
+#define HSI_SRC_AMODE_MASK (3 << 12)
+#define HSI_SRC_AMODE_CONST 0
+#define HSI_SRC_AMODE_POSTINC (1 << 12)
+
+#define HSI_CCR_ENABLE (1 << 7)
+
+#define HSI_CCR_SYNC_MASK 0x001f /* only for SSI */
+
+#define HSI_GDD_CCIR_BASE (HSI_GDD_BASE + 0x0804)
+#define HSI_GDD_CCIR_OFFSET 0x40
+#define HSI_GDD_CCIR_REG(channel) (HSI_GDD_CCIR_BASE +\
+ ((channel) * HSI_GDD_CCIR_OFFSET))
+
+#define HSI_BLOCK_IE (1 << 5)
+#define HSI_HALF_IE (1 << 2)
+#define HSI_TOUT_IE (1 << 0)
+
+#define HSI_GDD_CSR_BASE (HSI_GDD_BASE + 0x0806)
+#define HSI_GDD_CSR_OFFSET 0x40
+#define HSI_GDD_CSR_REG(channel) (HSI_GDD_CSR_BASE +\
+ ((channel) * HSI_GDD_CSR_OFFSET))
+
+#define HSI_CSR_SYNC (1 << 6)
+#define HSI_CSR_BLOCK (1 << 5) /* Full block is transferred */
+#define HSI_CSR_HALF (1 << 2) /* Half block is transferred */
+#define HSI_CSR_TOUT (1 << 0) /* Time-out overflow occurs */
+
+#define HSI_GDD_CSSA_BASE (HSI_GDD_BASE + 0x0808)
+#define HSI_GDD_CSSA_OFFSET 0x40
+#define HSI_GDD_CSSA_REG(channel) (HSI_GDD_CSSA_BASE +\
+ ((channel) * HSI_GDD_CSSA_OFFSET))
+
+
+#define HSI_GDD_CDSA_BASE (HSI_GDD_BASE + 0x080c)
+#define HSI_GDD_CDSA_OFFSET 0x40
+#define HSI_GDD_CDSA_REG(channel) (HSI_GDD_CDSA_BASE +\
+ ((channel) * HSI_GDD_CDSA_OFFSET))
+
+#define HSI_GDD_CEN_BASE (HSI_GDD_BASE + 0x0810)
+#define HSI_GDD_CEN_OFFSET 0x40
+#define HSI_GDD_CEN_REG(channel) (HSI_GDD_CEN_BASE +\
+ ((channel) * HSI_GDD_CEN_OFFSET))
+
+
+#define HSI_GDD_CSAC_BASE (HSI_GDD_BASE + 0x0818)
+#define HSI_GDD_CSAC_OFFSET 0x40
+#define HSI_GDD_CSAC_REG(channel) (HSI_GDD_CSAC_BASE +\
+ ((channel) * HSI_GDD_CSAC_OFFSET))
+
+#define HSI_GDD_CDAC_BASE (HSI_GDD_BASE + 0x081a)
+#define HSI_GDD_CDAC_OFFSET 0x40
+#define HSI_GDD_CDAC_REG(channel) (HSI_GDD_CDAC_BASE +\
+ ((channel) * HSI_GDD_CDAC_OFFSET))
+
+#define HSI_SSI_GDD_CLNK_CTRL_BASE (HSI_GDD_BASE + 0x0828)
+#define HSI_SSI_GDD_CLNK_CTRL_OFFSET 0x40
+#define HSI_SSI_GDD_CLNK_CTRL_REG(channel) (HSI_SSI_GDD_CLNK_CTRL_BASE +\
+ (channel * HSI_SSI_GDD_CLNK_CTRL_OFFSET))
+
+#define HSI_SSI_ENABLE_LNK (1 << 15)
+#define HSI_SSI_STOP_LNK (1 << 14)
+#define HSI_SSI_NEXT_CH_ID_MASK 0xf
+
+/*
+ * HSI Helpers
+ */
+#define HSI_SYS_MPU_ENABLE_CH_REG(port, irq, channel) \
+ (((channel) < HSI_SSI_CHANNELS_MAX) ? \
+ HSI_SYS_MPU_ENABLE_REG(port, irq) : \
+ HSI_SYS_MPU_U_ENABLE_REG(port, irq))
+
+#define HSI_SYS_MPU_STATUS_CH_REG(port, irq, channel) \
+ ((channel < HSI_SSI_CHANNELS_MAX) ? \
+ HSI_SYS_MPU_STATUS_REG(port, irq) : \
+ HSI_SYS_MPU_U_STATUS_REG(port, irq))
+/**
+ * struct omap_ssi_config - SSI board configuration
+ * @num_ports: Number of ports in use
+ * @cawake_line: Array of cawake gpio lines
+ */
+struct omap_ssi_board_config {
+ unsigned int num_ports;
+ int cawake_gpio[2];
+};
+extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config);
+
+/**
+ * struct omap_hsi_config - HSI board configuration
+ * @num_ports: Number of ports in use
+ */
+struct omap_hsi_board_config {
+ unsigned int num_ports;
+};
+extern int omap_hsi_config(struct omap_hsi_board_config *hsi_config);
+
+#ifdef CONFIG_OMAP_HSI
+extern int omap_hsi_prepare_suspend(int hsi_port, bool dev_may_wakeup);
+extern int omap_hsi_prepare_idle(void);
+extern int omap_hsi_wakeup(int hsi_port);
+extern int omap_hsi_is_io_wakeup_from_hsi(void);
+#else
+inline int omap_hsi_prepare_suspend(int hsi_port,
+ bool dev_may_wakeup) { return -ENOSYS; }
+inline int omap_hsi_prepare_idle(void) { return -ENOSYS; }
+inline int omap_hsi_wakeup(int hsi_port) { return -ENOSYS; }
+inline int omap_hsi_is_io_wakeup_from_hsi(void) { return -ENOSYS; }
+
+#endif
+
+#endif /* __OMAP_HSI_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 1adea9c..2f279ad 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -40,6 +40,7 @@ struct omap_device;
extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type1;
extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
+extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
/*
* OCP SYSCONFIG bit shifts/masks TYPE1. These are for IPs compliant
@@ -69,6 +70,13 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
#define SYSC_TYPE2_MIDLEMODE_SHIFT 4
#define SYSC_TYPE2_MIDLEMODE_MASK (0x3 << SYSC_TYPE2_MIDLEMODE_SHIFT)
+/*
+ * OCP SYSCONFIG bit shifts/masks TYPE3. These are for IPs compliant
+ * that only implement the sidle feature.
+ */
+#define SYSC_TYPE3_SIDLEMODE_SHIFT 0
+#define SYSC_TYPE3_SIDLEMODE_MASK (0x3 << SYSC_TYPE3_SIDLEMODE_SHIFT)
+
/* OCP SYSSTATUS bit shifts/masks */
#define SYSS_RESETDONE_SHIFT 0
#define SYSS_RESETDONE_MASK (1 << SYSS_RESETDONE_SHIFT)
@@ -77,7 +85,6 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
#define HWMOD_IDLEMODE_FORCE (1 << 0)
#define HWMOD_IDLEMODE_NO (1 << 1)
#define HWMOD_IDLEMODE_SMART (1 << 2)
-/* Slave idle mode flag only */
#define HWMOD_IDLEMODE_SMART_WKUP (1 << 3)
/**
@@ -258,6 +265,7 @@ struct omap_hwmod_ocp_if {
#define MSTANDBY_FORCE (HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT)
#define MSTANDBY_NO (HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT)
#define MSTANDBY_SMART (HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT)
+#define MSTANDBY_SMART_WKUP (HWMOD_IDLEMODE_SMART_WKUP << MASTER_STANDBY_SHIFT)
/* omap_hwmod_sysconfig.sysc_flags capability flags */
#define SYSC_HAS_AUTOIDLE (1 << 0)
@@ -300,6 +308,7 @@ struct omap_hwmod_sysc_fields {
* @rev_offs: IP block revision register offset (from module base addr)
* @sysc_offs: OCP_SYSCONFIG register offset (from module base addr)
* @syss_offs: OCP_SYSSTATUS register offset (from module base addr)
+ * @srst_udelay: Delay needed after doing a softreset in usecs
* @idlemodes: One or more of {SIDLE,MSTANDBY}_{OFF,FORCE,SMART}
* @sysc_flags: SYS{C,S}_HAS* flags indicating SYSCONFIG bits supported
* @clockact: the default value of the module CLOCKACTIVITY bits
@@ -325,6 +334,7 @@ struct omap_hwmod_class_sysconfig {
u16 sysc_offs;
u16 syss_offs;
u16 sysc_flags;
+ u16 srst_udelay;
u8 idlemodes;
u8 clockact;
struct omap_hwmod_sysc_fields *sysc_fields;
@@ -359,11 +369,15 @@ struct omap_hwmod_omap2_prcm {
* struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
* @clkctrl_reg: PRCM address of the clock control register
* @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
+ * @context_reg: addres of the context register
+ * @ctx_restore_trig : indicates if RFF or DFF or both lost
+ * should trigger ctx restore.
* @submodule_wkdep_bit: bit shift of the WKDEP range
*/
struct omap_hwmod_omap4_prcm {
void __iomem *clkctrl_reg;
void __iomem *rstctrl_reg;
+ void __iomem *context_reg;
u8 submodule_wkdep_bit;
};
@@ -519,8 +533,6 @@ struct omap_hwmod {
const char *main_clk;
struct clk *_clk;
struct omap_hwmod_opt_clk *opt_clks;
- char *vdd_name;
- struct voltagedomain *voltdm;
struct omap_hwmod_ocp_if **masters; /* connect to *_IA */
struct omap_hwmod_ocp_if **slaves; /* connect to *_TA */
void *dev_attr;
@@ -598,10 +610,14 @@ int omap_hwmod_for_each_by_class(const char *classname,
void *user);
int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state);
-u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
+int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
+int omap_hwmod_pad_get_wakeup_status(struct omap_hwmod *oh);
+
+int omap_hwmod_disable_ioring_wakeup(struct omap_hwmod *oh);
+int omap_hwmod_enable_ioring_wakeup(struct omap_hwmod *oh);
/*
* Chip variant-specific hwmod init routines - XXX should be converted
* to use initcalls once the initial boot ordering is straightened out
@@ -611,4 +627,6 @@ extern int omap2430_hwmod_init(void);
extern int omap3xxx_hwmod_init(void);
extern int omap44xx_hwmod_init(void);
+extern struct device *omap_hwmod_name_get_dev(const char *oh_name);
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/remoteproc.h b/arch/arm/plat-omap/include/plat/remoteproc.h
new file mode 100644
index 0000000..3de2a38
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/remoteproc.h
@@ -0,0 +1,151 @@
+/*
+ * Remote Processor - omap-specific bits
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PLAT_REMOTEPROC_H
+#define _PLAT_REMOTEPROC_H
+
+#include <linux/remoteproc.h>
+#include <plat/omap_device.h>
+
+/*
+ * struct omap_rproc_timers_info - optional timers for the omap rproc
+ *
+ * @id: timer id to use by the remoteproc
+ * @odt: timer pointer
+ */
+struct omap_rproc_timers_info {
+ int id;
+ struct omap_dm_timer *odt;
+};
+
+/*
+ * struct omap_rproc_pdata - platform data for the omap rproc implementation
+ *
+ * @name: human readable name of the rproc, cannot exceed RPROC_MAN_NAME bytes
+ * @iommu_name: iommu device we're behind of
+ * @oh_name: omap hwmod device
+ * @oh_name_opt: optional, secondary omap hwmod device
+ * @firmware: name of firmware file to be loaded
+ * @clkdm_name: name of clock domain in which this device is located
+ * @clkdm: clock domain in which this device is located
+ * @ops: platform-specific start/stop rproc handlers
+ * @memory_maps: table of da-to-pa iommu memory maps
+ * @memory_pool: platform-specific pool data
+ * @omap_rproc_timers_info: optional, timer(s) rproc can use
+ */
+struct omap_rproc_pdata {
+ const char *name;
+ const char *iommu_name;
+ const char *oh_name;
+ const char *oh_name_opt;
+ const char *firmware;
+ const char *clkdm_name;
+ struct clockdomain *clkdm;
+ const struct rproc_ops *ops;
+ struct rproc_mem_pool *memory_pool;
+ struct omap_rproc_timers_info *timers;
+ u32 idle_addr;
+ u32 idle_mask;
+ u32 suspend_addr;
+ u32 suspend_mask;
+ unsigned sus_timeout;
+ char *sus_mbox_name;
+ u8 timers_cnt;
+};
+
+enum omap_rproc_mempool_type {
+ OMAP_RPROC_MEMPOOL_STATIC,
+ OMAP_RPROC_MEMPOOL_DYNAMIC
+};
+
+#if defined(CONFIG_OMAP_REMOTE_PROC)
+void omap_ipu_reserve_sdram_memblock(void);
+u32 omap_ipu_get_mempool_size(enum omap_rproc_mempool_type type);
+phys_addr_t omap_ipu_get_mempool_base(enum omap_rproc_mempool_type type);
+void omap_ipu_set_static_mempool(u32 start, u32 size);
+#else
+static inline void omap_ipu_reserve_sdram_memblock(void) { }
+static inline u32 omap_ipu_get_mempool_size(enum omap_rproc_mempool_type type)
+{
+ return 0;
+}
+static inline phys_addr_t omap_ipu_get_mempool_base(
+ enum omap_rproc_mempool_type type)
+{
+ return 0;
+}
+static inline void omap_ipu_set_static_mempool(u32 start, u32 size) { }
+#endif
+
+int omap_rproc_deactivate(struct omap_device *od);
+int omap_rproc_activate(struct omap_device *od);
+#define OMAP_RPROC_DEFAULT_PM_LATENCY \
+ .deactivate_func = omap_rproc_deactivate, \
+ .activate_func = omap_rproc_activate, \
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST
+
+struct exc_regs {
+ u32 r0;
+ u32 r1;
+ u32 r2;
+ u32 r3;
+ u32 r4;
+ u32 r5;
+ u32 r6;
+ u32 r7;
+ u32 r8;
+ u32 r9;
+ u32 r10;
+ u32 r11;
+ u32 r12;
+ u32 sp;
+ u32 lr;
+ u32 pc;
+ u32 psr;
+ u32 ICSR; /* NVIC registers */
+ u32 MMFSR;
+ u32 BFSR;
+ u32 UFSR;
+ u32 HFSR;
+ u32 DFSR;
+ u32 MMAR;
+ u32 BFAR;
+ u32 AFSR;
+};
+
+static inline void remoteproc_fill_pt_regs(struct pt_regs *regs,
+ struct exc_regs *xregs)
+{
+ regs->ARM_r0 = xregs->r0;
+ regs->ARM_ORIG_r0 = xregs->r0;
+ regs->ARM_r1 = xregs->r1;
+ regs->ARM_r2 = xregs->r2;
+ regs->ARM_r3 = xregs->r3;
+ regs->ARM_r4 = xregs->r4;
+ regs->ARM_r5 = xregs->r5;
+ regs->ARM_r6 = xregs->r6;
+ regs->ARM_r7 = xregs->r7;
+ regs->ARM_r8 = xregs->r8;
+ regs->ARM_r9 = xregs->r9;
+ regs->ARM_r10 = xregs->r10;
+ regs->ARM_fp = xregs->r11;
+ regs->ARM_ip = xregs->r12;
+ regs->ARM_sp = xregs->sp;
+ regs->ARM_lr = xregs->lr;
+ regs->ARM_pc = xregs->pc;
+ regs->ARM_cpsr = xregs->psr;
+}
+
+#endif /* _PLAT_REMOTEPROC_H */
diff --git a/arch/arm/plat-omap/include/plat/rpmsg.h b/arch/arm/plat-omap/include/plat/rpmsg.h
new file mode 100644
index 0000000..c78b9d2
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/rpmsg.h
@@ -0,0 +1,68 @@
+/*
+ * Remote processor messaging
+ *
+ * Copyright(c) 2011 Texas Instruments. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PLAT_RPMSG_H
+#define _PLAT_RPMSG_H
+
+/*
+ * enum - Predefined Mailbox Messages
+ *
+ * @RP_MBOX_READY: informs the M3's that we're up and running. will be
+ * followed by another mailbox message that carries the A9's virtual address
+ * of the shared buffer. This would allow the A9's drivers to send virtual
+ * addresses of the buffers.
+ *
+ * @RP_MBOX_PENDING_MSG: informs the receiver that there is an inbound
+ * message waiting in its own receive-side vring. please note that currently
+ * this message is optional: alternatively, one can explicitly send the index
+ * of the triggered virtqueue itself. the preferred approach will be decided
+ * as we progress and experiment with those design ideas.
+ *
+ * @RP_MBOX_CRASH: this message is sent upon a BIOS exception
+ *
+ * @RP_MBOX_ECHO_REQUEST: a mailbox-level "ping" message.
+ *
+ * @RP_MBOX_ECHO_REPLY: a mailbox-level reply to a "ping"
+ *
+ * @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the
+ * recovery mechanism (to some extent). will trigger a @RP_MBOX_CRASH reply.
+ */
+enum {
+ RP_MBOX_READY = 0xFFFFFF00,
+ RP_MBOX_PENDING_MSG = 0xFFFFFF01,
+ RP_MBOX_CRASH = 0xFFFFFF02,
+ RP_MBOX_ECHO_REQUEST = 0xFFFFFF03,
+ RP_MBOX_ECHO_REPLY = 0xFFFFFF04,
+ RP_MBOX_ABORT_REQUEST = 0xFFFFFF05,
+};
+
+#endif /* _PLAT_RPMSG_H */
diff --git a/arch/arm/plat-omap/include/plat/rpres.h b/arch/arm/plat-omap/include/plat/rpres.h
new file mode 100644
index 0000000..0dfb781
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/rpres.h
@@ -0,0 +1,57 @@
+/*
+ * Remote processor resources
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#ifndef _PLAT_OMAP_RPRES_H
+#define _PLAT_OMAP_RPRES_H
+
+enum rpres_constraint {
+ RPRES_CONSTRAINT_SCALE,
+ RPRES_CONSTRAINT_LATENCY,
+ RPRES_CONSTRAINT_BANDWIDTH,
+};
+
+enum {
+ RPRES_INACTIVE,
+ RPRES_ACTIVE,
+};
+
+struct rpres_ops {
+ int (*start)(struct platform_device *pdev);
+ int (*stop)(struct platform_device *pdev);
+ int (*set_lat)(struct platform_device *pdev, long v);
+ int (*set_bw)(struct platform_device *pdev, long v);
+ int (*scale_dev)(struct platform_device *pdev, long v);
+};
+
+struct rpres_platform_data {
+ const char *name;
+ const char *oh_name;
+ struct omap_hwmod *oh;
+ struct rpres_ops *ops;
+ struct clk *opt_clk;
+ const char *opt_clk_name;
+ struct device *(*get_dev)(void);
+};
+
+struct rpres {
+ struct list_head next;
+ const char *name;
+ struct platform_device *pdev;
+ int state;
+ struct mutex lock;
+};
+
+struct rpres *rpres_get(const char *);
+void rpres_put(struct rpres *);
+int rpres_set_constraints(struct rpres *, enum rpres_constraint type, long val);
+#endif /* _PLAT_OMAP_RPRES_H */
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 2723f91..8e41fb6 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -103,15 +103,23 @@
#ifndef __ASSEMBLER__
struct omap_board_data;
+struct omap_uart_port_info;
+struct omap_device_pad;
extern void omap_serial_init(void);
-extern void omap_serial_init_port(struct omap_board_data *bdata);
-extern int omap_uart_can_sleep(void);
-extern void omap_uart_check_wakeup(void);
-extern void omap_uart_prepare_suspend(void);
-extern void omap_uart_prepare_idle(int num);
-extern void omap_uart_resume_idle(int num);
-extern void omap_uart_enable_irqs(int enable);
+extern void omap_serial_board_init(struct omap_uart_port_info *platform_data);
+extern void omap_serial_init_port(struct omap_board_data *bdata,
+ struct omap_uart_port_info *platform_data);
+void __init omap_serial_init_port_pads(int id, struct omap_device_pad *pads,
+ int size, struct omap_uart_port_info *info);
+extern u32 omap_uart_resume_idle(void);
+extern int omap_uart_wake(u8 id);
+extern int omap_uart_enable(u8 uart_num);
+extern int omap_uart_disable(u8 uart_num);
+
+#define MUX_PULL_UP ((1<<8) | (1<<4) | (1<<3) | (7))
+void omap_rts_mux_write(u16 val, int num);
+
#endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index f500fc3..a6000d4 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -15,6 +15,7 @@
#include <asm/fncpy.h>
extern void *omap_sram_push_address(unsigned long size);
+extern unsigned long omap_get_sram_barrier_base(void);
/* Macro to push a function to the internal SRAM, using the fncpy API */
#define omap_sram_push(funcp, size) ({ \
diff --git a/arch/arm/plat-omap/include/plat/temperature_sensor.h b/arch/arm/plat-omap/include/plat/temperature_sensor.h
new file mode 100644
index 0000000..5f0d6b3
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/temperature_sensor.h
@@ -0,0 +1,65 @@
+/*
+ * OMAP446x Temperature sensor header file
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_TEMPERATURE_SENSOR_H
+#define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_TEMPERATURE_SENSOR_H
+
+/*
+ * Offsets from the base of temperature sensor registers
+ */
+#define TEMP_SENSOR_CTRL_OFFSET 0x00
+#define BGAP_CTRL_OFFSET 0x4c
+#define BGAP_COUNTER_OFFSET 0x50
+#define BGAP_THRESHOLD_OFFSET 0x54
+#define BGAP_TSHUT_OFFSET 0x58
+#define BGAP_STATUS_OFFSET 0x5c
+
+#define OMAP_TSHUT_GPIO 86
+
+
+/*
+ * omap_temp_sensor platform data
+ * @name - name
+ * @irq - Irq number for thermal alertemp_sensor
+ * @offset - offset of the temp sensor ctrl register
+ */
+struct omap_temp_sensor_pdata {
+ char *name;
+ u32 offset;
+ int irq;
+};
+
+#ifdef CONFIG_OMAP_TEMP_SENSOR
+void omap_temp_sensor_resume_idle(void);
+void omap_temp_sensor_prepare_idle(void);
+#else
+static inline void omap_temp_sensor_resume_idle(void) { }
+static inline void omap_temp_sensor_prepare_idle(void) { }
+#endif
+
+#ifdef CONFIG_OMAP_DIE_TEMP_SENSOR
+void omap_temp_sensor_idle(int idle_state);
+#else
+static inline void omap_temp_sensor_idle(int idle_state) { }
+#endif
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 17d3c93..3be4a83 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -41,6 +41,11 @@ struct usbhs_omap_board_data {
* Each PHY can have a separate regulator.
*/
struct regulator *regulator[OMAP3_HS_USB_PORTS];
+ /*
+ * Each Port can have an external transceiver requiring clock control
+ * for low power mode entry
+ */
+ struct clk *transceiver_clk[OMAP3_HS_USB_PORTS];
};
struct ehci_hcd_omap_platform_data {
@@ -48,6 +53,11 @@ struct ehci_hcd_omap_platform_data {
int reset_gpio_port[OMAP3_HS_USB_PORTS];
struct regulator *regulator[OMAP3_HS_USB_PORTS];
unsigned phy_reset:1;
+ /*
+ * Each Port can have an external transceiver requiring clock control
+ * for low power mode entry
+ */
+ struct clk *transceiver_clk[OMAP3_HS_USB_PORTS];
};
struct ohci_hcd_omap_platform_data {
@@ -100,9 +110,6 @@ extern void usb_musb_init(struct omap_musb_board_data *board_data);
extern void usbhs_init(const struct usbhs_omap_board_data *pdata);
-extern int omap_usbhs_enable(struct device *dev);
-extern void omap_usbhs_disable(struct device *dev);
-
extern int omap4430_phy_power(struct device *dev, int ID, int on);
extern int omap4430_phy_set_clk(struct device *dev, int on);
extern int omap4430_phy_init(struct device *dev);
@@ -293,4 +300,9 @@ static inline u32 omap1_usb2_init(unsigned nwires, unsigned alt_pingroup)
}
#endif
+extern void usbhs_wakeup(void);
+extern void omap4_trigger_ioctrl(void);
+
+#define USBHS_EHCI_HWMODNAME "usbhs_ehci"
+
#endif /* __ASM_ARCH_OMAP_USB_H */
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 34fc31e..632fbe2 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -107,11 +107,8 @@ static int iommu_enable(struct iommu *obj)
if (!arch_iommu)
return -ENODEV;
- clk_enable(obj->clk);
-
err = arch_iommu->enable(obj);
- clk_disable(obj->clk);
return err;
}
@@ -120,11 +117,7 @@ static void iommu_disable(struct iommu *obj)
if (!obj)
return;
- clk_enable(obj->clk);
-
arch_iommu->disable(obj);
-
- clk_disable(obj->clk);
}
/*
@@ -244,11 +237,14 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
struct iotlb_lock l;
struct cr_regs *cr;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return -EBUSY;
+ }
+
if (!obj || !obj->nr_tlb_entries || !e)
return -EINVAL;
- clk_enable(obj->clk);
-
iotlb_lock_get(obj, &l);
if (l.base == obj->nr_tlb_entries) {
dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
@@ -277,7 +273,6 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
cr = iotlb_alloc_cr(obj, e);
if (IS_ERR(cr)) {
- clk_disable(obj->clk);
return PTR_ERR(cr);
}
@@ -291,7 +286,6 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
l.vict = l.base;
iotlb_lock_set(obj, &l);
out:
- clk_disable(obj->clk);
return err;
}
EXPORT_SYMBOL_GPL(load_iotlb_entry);
@@ -308,7 +302,10 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
int i;
struct cr_regs cr;
- clk_enable(obj->clk);
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return;
+ }
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
u32 start;
@@ -327,7 +324,6 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
- clk_disable(obj->clk);
if (i == obj->nr_tlb_entries)
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -362,15 +358,11 @@ void flush_iotlb_all(struct iommu *obj)
{
struct iotlb_lock l;
- clk_enable(obj->clk);
-
l.base = 0;
l.vict = 0;
iotlb_lock_set(obj, &l);
iommu_write_reg(obj, 1, MMU_GFLUSH);
-
- clk_disable(obj->clk);
}
EXPORT_SYMBOL_GPL(flush_iotlb_all);
@@ -385,9 +377,7 @@ EXPORT_SYMBOL_GPL(flush_iotlb_all);
*/
void iommu_set_twl(struct iommu *obj, bool on)
{
- clk_enable(obj->clk);
arch_iommu->set_twl(obj, on);
- clk_disable(obj->clk);
}
EXPORT_SYMBOL_GPL(iommu_set_twl);
@@ -398,12 +388,8 @@ ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
if (!obj || !buf)
return -EINVAL;
- clk_enable(obj->clk);
-
bytes = arch_iommu->dump_ctx(obj, buf, bytes);
- clk_disable(obj->clk);
-
return bytes;
}
EXPORT_SYMBOL_GPL(iommu_dump_ctx);
@@ -415,7 +401,6 @@ static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
struct cr_regs tmp;
struct cr_regs *p = crs;
- clk_enable(obj->clk);
iotlb_lock_get(obj, &saved);
for_each_iotlb_cr(obj, num, i, tmp) {
@@ -425,7 +410,6 @@ static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
}
iotlb_lock_set(obj, &saved);
- clk_disable(obj->clk);
return p - crs;
}
@@ -471,22 +455,15 @@ EXPORT_SYMBOL_GPL(foreach_iommu_device);
*/
static void flush_iopgd_range(u32 *first, u32 *last)
{
- /* FIXME: L2 cache should be taken care of if it exists */
- do {
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
- : : "r" (first));
- first += L1_CACHE_BYTES / sizeof(*first);
- } while (first <= last);
+ dmac_flush_range(first, last);
+ outer_flush_range(virt_to_phys(first), virt_to_phys(last));
}
+
static void flush_iopte_range(u32 *first, u32 *last)
{
- /* FIXME: L2 cache should be taken care of if it exists */
- do {
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
- : : "r" (first));
- first += L1_CACHE_BYTES / sizeof(*first);
- } while (first <= last);
+ dmac_flush_range(first, last);
+ outer_flush_range(virt_to_phys(first), virt_to_phys(last));
}
static void iopte_free(u32 *iopte)
@@ -515,7 +492,7 @@ static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
return ERR_PTR(-ENOMEM);
*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
} else {
@@ -544,7 +521,7 @@ static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
}
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
return 0;
}
@@ -561,7 +538,7 @@ static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
for (i = 0; i < 16; i++)
*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
- flush_iopgd_range(iopgd, iopgd + 15);
+ flush_iopgd_range(iopgd, iopgd + 16);
return 0;
}
@@ -574,7 +551,7 @@ static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
return PTR_ERR(iopte);
*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
- flush_iopte_range(iopte, iopte);
+ flush_iopte_range(iopte, iopte + 1);
dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
__func__, da, pa, iopte, *iopte);
@@ -599,7 +576,7 @@ static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
for (i = 0; i < 16; i++)
*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
- flush_iopte_range(iopte, iopte + 15);
+ flush_iopte_range(iopte, iopte + 16);
return 0;
}
@@ -649,6 +626,11 @@ int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
{
int err;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return -EBUSY;
+ }
+
flush_iotlb_page(obj, e->da);
err = iopgtable_store_entry_core(obj, e);
#ifdef PREFETCH_IOTLB
@@ -670,6 +652,11 @@ void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
{
u32 *iopgd, *iopte = NULL;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return;
+ }
+
iopgd = iopgd_offset(obj, da);
if (!*iopgd)
goto out;
@@ -739,6 +726,11 @@ size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
{
size_t bytes;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return 0;
+ }
+
spin_lock(&obj->page_table_lock);
bytes = iopgtable_clear_entry_core(obj, da);
@@ -770,7 +762,7 @@ static void iopgtable_clear_entry_all(struct iommu *obj)
iopte_free(iopte_offset(iopgd, 0));
*iopgd = 0;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
}
flush_iotlb_all(obj);
@@ -790,9 +782,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!obj->refcount)
return IRQ_NONE;
- clk_enable(obj->clk);
errs = iommu_report_fault(obj, &da);
- clk_disable(obj->clk);
if (errs == 0)
return IRQ_HANDLED;
@@ -800,7 +790,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
return IRQ_HANDLED;
- iommu_disable(obj);
+ iommu_write_reg(obj, 0, MMU_IRQENABLE);
iopgd = iopgd_offset(obj, da);
@@ -839,7 +829,7 @@ int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
{
if (!obj)
- return -EFAULT;
+ return -EIO;
if (end < start || !PAGE_ALIGN(start | end))
return -EINVAL;
@@ -871,9 +861,13 @@ struct iommu *iommu_get(const char *name)
mutex_lock(&obj->iommu_lock);
if (obj->refcount++ == 0) {
+ dev_info(obj->dev, "%s: %s qos_request\n", __func__, obj->name);
+ pm_qos_update_request(obj->qos_request, 10);
err = iommu_enable(obj);
- if (err)
+ if (err) {
+ pm_qos_update_request(obj->qos_request, -1);
goto err_enable;
+ }
flush_iotlb_all(obj);
}
@@ -906,8 +900,16 @@ void iommu_put(struct iommu *obj)
mutex_lock(&obj->iommu_lock);
- if (--obj->refcount == 0)
+ if (!obj->refcount) {
+ dev_err(obj->dev, "%s: %s unbalanced iommu_get/put\n",
+ __func__, obj->name);
+ return;
+ }
+
+ if (--obj->refcount == 0) {
iommu_disable(obj);
+ pm_qos_update_request(obj->qos_request, -1);
+ }
module_put(obj->owner);
@@ -944,6 +946,30 @@ int iommu_set_isr(const char *name,
}
EXPORT_SYMBOL_GPL(iommu_set_isr);
+int iommu_set_secure(const char *name, bool enable, void *data)
+{
+ struct device *dev;
+ struct iommu *obj;
+
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+ device_match_by_alias);
+ if (!dev)
+ return -ENODEV;
+
+ obj = to_iommu(dev);
+ mutex_lock(&obj->iommu_lock);
+ if (obj->refcount) {
+ mutex_unlock(&obj->iommu_lock);
+ return -EBUSY;
+ }
+ obj->secure_mode = enable;
+ obj->secure_ttb = data;
+ mutex_unlock(&obj->iommu_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_set_secure);
+
/*
* OMAP Device MMU(IOMMU) detection
*/
@@ -951,25 +977,17 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
{
int err = -ENODEV;
void *p;
- int irq;
struct iommu *obj;
- struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (pdev->num_resources != 2)
- return -EINVAL;
-
obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- obj->clk = clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(obj->clk))
- goto err_clk;
-
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
obj->dev = &pdev->dev;
+ obj->pdev = pdev;
obj->ctx = (void *)obj + sizeof(*obj);
obj->da_start = pdata->da_start;
obj->da_end = pdata->da_end;
@@ -979,31 +997,18 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
spin_lock_init(&obj->page_table_lock);
INIT_LIST_HEAD(&obj->mmap);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto err_mem;
- }
+ obj->regbase = pdata->io_base;
- res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!res) {
- err = -EIO;
- goto err_mem;
+ obj->qos_request = kzalloc(sizeof(*obj->qos_request), GFP_KERNEL);
+ if (!obj->qos_request) {
+ kfree(obj);
+ return -ENOMEM;
}
- obj->regbase = ioremap(res->start, resource_size(res));
- if (!obj->regbase) {
- err = -ENOMEM;
- goto err_ioremap;
- }
+ pm_qos_add_request(obj->qos_request, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = -ENODEV;
- goto err_irq;
- }
- err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
+ err = request_irq(pdata->irq, iommu_fault_handler, IRQF_SHARED,
dev_name(&pdev->dev), obj);
if (err < 0)
goto err_irq;
@@ -1024,36 +1029,27 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
return 0;
err_pgd:
- free_irq(irq, obj);
+ free_irq(pdata->irq, obj);
err_irq:
- iounmap(obj->regbase);
-err_ioremap:
- release_mem_region(res->start, resource_size(res));
-err_mem:
- clk_put(obj->clk);
-err_clk:
kfree(obj);
return err;
}
static int __devexit omap_iommu_remove(struct platform_device *pdev)
{
- int irq;
- struct resource *res;
struct iommu *obj = platform_get_drvdata(pdev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
+
+ free_irq(pdata->irq, obj);
platform_set_drvdata(pdev, NULL);
iopgtable_clear_entry_all(obj);
free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
- irq = platform_get_irq(pdev, 0);
- free_irq(irq, obj);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- iounmap(obj->regbase);
+ pm_qos_remove_request(obj->qos_request);
+ kfree(obj->qos_request);
- clk_put(obj->clk);
dev_info(&pdev->dev, "%s removed\n", obj->name);
kfree(obj);
return 0;
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 83a37c5..6e711b9 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -163,10 +163,10 @@ static void sgtable_free(struct sg_table *sgt)
if (!sgt)
return;
+ pr_debug("%s: sgt:%p\n", __func__, sgt);
+
sg_free_table(sgt);
kfree(sgt);
-
- pr_debug("%s: sgt:%p\n", __func__, sgt);
}
/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 69ddc9f..653153e 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -29,6 +29,7 @@
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/notifier.h>
+#include <linux/pm_qos_params.h>
#include <plat/mailbox.h>
@@ -36,6 +37,10 @@ static struct omap_mbox **mboxes;
static int mbox_configured;
static DEFINE_MUTEX(mbox_configured_lock);
+struct pm_qos_request_list mbox_qos_request;
+
+#define SET_MPU_CORE_CONSTRAINT 10
+#define CLEAR_MPU_CORE_CONSTRAINT -1
static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
module_param(mbox_kfifo_size, uint, S_IRUGO);
@@ -251,6 +256,8 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
mutex_lock(&mbox_configured_lock);
if (!mbox_configured++) {
+ pm_qos_update_request(&mbox_qos_request,
+ SET_MPU_CORE_CONSTRAINT);
if (likely(mbox->ops->startup)) {
ret = mbox->ops->startup(mbox);
if (unlikely(ret))
@@ -260,13 +267,6 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
}
if (!mbox->use_count++) {
- ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
- mbox->name, mbox);
- if (unlikely(ret)) {
- pr_err("failed to register mailbox interrupt:%d\n",
- ret);
- goto fail_request_irq;
- }
mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
if (!mq) {
ret = -ENOMEM;
@@ -281,20 +281,29 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
}
mbox->rxq = mq;
mq->mbox = mbox;
+ ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
+ mbox->name, mbox);
+ if (unlikely(ret)) {
+ pr_err("failed to register mailbox interrupt:%d\n",
+ ret);
+ goto fail_request_irq;
+ }
}
mutex_unlock(&mbox_configured_lock);
return 0;
+fail_request_irq:
+ mbox_queue_free(mbox->rxq);
fail_alloc_rxq:
mbox_queue_free(mbox->txq);
fail_alloc_txq:
- free_irq(mbox->irq, mbox);
-fail_request_irq:
if (mbox->ops->shutdown)
mbox->ops->shutdown(mbox);
mbox->use_count--;
fail_startup:
- mbox_configured--;
+ if (!--mbox_configured)
+ pm_qos_update_request(&mbox_qos_request,
+ CLEAR_MPU_CORE_CONSTRAINT);
mutex_unlock(&mbox_configured_lock);
return ret;
}
@@ -306,14 +315,17 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
if (!--mbox->use_count) {
free_irq(mbox->irq, mbox);
tasklet_kill(&mbox->txq->tasklet);
- flush_work_sync(&mbox->rxq->work);
+ flush_work_sync(&mbox->rxq->work);
mbox_queue_free(mbox->txq);
mbox_queue_free(mbox->rxq);
}
if (likely(mbox->ops->shutdown)) {
- if (!--mbox_configured)
+ if (!--mbox_configured) {
mbox->ops->shutdown(mbox);
+ pm_qos_update_request(&mbox_qos_request,
+ CLEAR_MPU_CORE_CONSTRAINT);
+ }
}
mutex_unlock(&mbox_configured_lock);
@@ -350,7 +362,8 @@ EXPORT_SYMBOL(omap_mbox_get);
void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&mbox->notifier, nb);
+ if (nb)
+ blocking_notifier_chain_unregister(&mbox->notifier, nb);
omap_mbox_fini(mbox);
}
EXPORT_SYMBOL(omap_mbox_put);
@@ -395,6 +408,7 @@ int omap_mbox_unregister(void)
for (i = 0; mboxes[i]; i++)
device_unregister(mboxes[i]->dev);
+
mboxes = NULL;
return 0;
}
@@ -413,6 +427,8 @@ static int __init omap_mbox_init(void)
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
+ pm_qos_add_request(&mbox_qos_request, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
return 0;
}
subsys_initcall(omap_mbox_init);
@@ -420,6 +436,7 @@ subsys_initcall(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
class_unregister(&omap_mbox_class);
+ pm_qos_remove_request(&mbox_qos_request);
}
module_exit(omap_mbox_exit);
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 5587acf..e02baa4 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -75,6 +75,11 @@ static int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
{
return __raw_readl(mcbsp->st_data->io_base_st + reg);
}
+
+#define MCBSP_ST_READ(mcbsp, reg) \
+ omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
+#define MCBSP_ST_WRITE(mcbsp, reg, val) \
+ omap_mcbsp_st_write(mcbsp, OMAP_ST_REG_##reg, val)
#endif
#define MCBSP_READ(mcbsp, reg) \
@@ -84,11 +89,6 @@ static int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
#define MCBSP_READ_CACHE(mcbsp, reg) \
omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 1)
-#define MCBSP_ST_READ(mcbsp, reg) \
- omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
-#define MCBSP_ST_WRITE(mcbsp, reg, val) \
- omap_mcbsp_st_write(mcbsp, OMAP_ST_REG_##reg, val)
-
static void omap_mcbsp_dump_reg(u8 id)
{
struct omap_mcbsp *mcbsp = id_to_mcbsp_ptr(id);
@@ -292,14 +292,16 @@ int omap_mcbsp_dma_reg_params(unsigned int id, unsigned int stream)
}
EXPORT_SYMBOL(omap_mcbsp_dma_reg_params);
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static struct omap_device *find_omap_device_by_dev(struct device *dev)
{
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
return container_of(pdev, struct omap_device, pdev);
}
+#endif
+#ifdef CONFIG_ARCH_OMAP3
static void omap_st_on(struct omap_mcbsp *mcbsp)
{
unsigned int w;
@@ -550,7 +552,12 @@ int omap_st_is_enabled(unsigned int id)
return st_data->enabled;
}
EXPORT_SYMBOL(omap_st_is_enabled);
+#else
+static inline void omap_st_start(struct omap_mcbsp *mcbsp) {}
+static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {}
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
/*
* omap_mcbsp_set_rx_threshold configures the transmit threshold in words.
* The threshold parameter is 1 based, and it is converted (threshold - 1)
@@ -754,8 +761,6 @@ static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp)
#else
static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {}
static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {}
-static inline void omap_st_start(struct omap_mcbsp *mcbsp) {}
-static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {}
#endif
/*
@@ -991,6 +996,25 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx)
MCBSP_WRITE(mcbsp, RCCR, w);
}
+ /*Disable and Re-enable transmitter if ready */
+ if (tx && (MCBSP_READ(mcbsp, SPCR2) & XRDY)) {
+ MCBSP_WRITE(mcbsp, SPCR2,
+ MCBSP_READ_CACHE(mcbsp, SPCR2) &
+ (~XRST));
+ MCBSP_WRITE(mcbsp, SPCR2,
+ MCBSP_READ_CACHE(mcbsp, SPCR2) |
+ (XRST));
+ }
+ /*Disable and Re-enable receiver if ready */
+ if (rx && (MCBSP_READ(mcbsp, SPCR1) & RRDY)) {
+ MCBSP_WRITE(mcbsp, SPCR1,
+ MCBSP_READ_CACHE(mcbsp, SPCR1) &
+ (~RRST));
+ MCBSP_WRITE(mcbsp, SPCR1,
+ MCBSP_READ_CACHE(mcbsp, SPCR1) |
+ (RRST));
+ }
+
/* Dump McBSP Regs */
omap_mcbsp_dump_reg(id);
}
@@ -1522,7 +1546,7 @@ void omap_mcbsp_set_spi_mode(unsigned int id,
}
EXPORT_SYMBOL(omap_mcbsp_set_spi_mode);
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
#define max_thres(m) (mcbsp->pdata->buffer_size)
#define valid_threshold(m, val) ((val) <= max_thres(m))
#define THRESHOLD_PROP_BUILDER(prop) \
@@ -1613,6 +1637,29 @@ unlock:
static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store);
+static const struct attribute *additional_attrs[] = {
+ &dev_attr_max_tx_thres.attr,
+ &dev_attr_max_rx_thres.attr,
+ &dev_attr_dma_op_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group additional_attr_group = {
+ .attrs = (struct attribute **)additional_attrs,
+};
+
+static inline int __devinit omap_additional_add(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &additional_attr_group);
+}
+
+static inline void __devexit omap_additional_remove(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &additional_attr_group);
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
static ssize_t st_taps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1671,27 +1718,6 @@ out:
static DEVICE_ATTR(st_taps, 0644, st_taps_show, st_taps_store);
-static const struct attribute *additional_attrs[] = {
- &dev_attr_max_tx_thres.attr,
- &dev_attr_max_rx_thres.attr,
- &dev_attr_dma_op_mode.attr,
- NULL,
-};
-
-static const struct attribute_group additional_attr_group = {
- .attrs = (struct attribute **)additional_attrs,
-};
-
-static inline int __devinit omap_additional_add(struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &additional_attr_group);
-}
-
-static inline void __devexit omap_additional_remove(struct device *dev)
-{
- sysfs_remove_group(&dev->kobj, &additional_attr_group);
-}
-
static const struct attribute *sidetone_attrs[] = {
&dev_attr_st_taps.attr,
NULL,
@@ -1749,11 +1775,16 @@ static void __devexit omap_st_remove(struct omap_mcbsp *mcbsp)
kfree(st_data);
}
}
+#else
+static inline int __devinit omap_st_add(struct omap_mcbsp *mcbsp) { return 0; }
+static inline void __devexit omap_st_remove(struct omap_mcbsp *mcbsp) {}
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp)
{
mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT;
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
/*
* Initially configure the maximum thresholds to a safe value.
* The McBSP FIFO usage with these values should not go under
@@ -1771,26 +1802,26 @@ static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp)
if (omap_additional_add(mcbsp->dev))
dev_warn(mcbsp->dev,
"Unable to create additional controls\n");
+ } else {
+ mcbsp->max_tx_thres = -EINVAL;
+ mcbsp->max_rx_thres = -EINVAL;
+ }
+ if (cpu_is_omap34xx()) {
if (mcbsp->id == 2 || mcbsp->id == 3)
if (omap_st_add(mcbsp))
dev_warn(mcbsp->dev,
"Unable to create sidetone controls\n");
-
- } else {
- mcbsp->max_tx_thres = -EINVAL;
- mcbsp->max_rx_thres = -EINVAL;
}
}
static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp)
{
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
omap_additional_remove(mcbsp->dev);
-
+ if (cpu_is_omap34xx())
if (mcbsp->id == 2 || mcbsp->id == 3)
omap_st_remove(mcbsp);
- }
}
#else
static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {}
diff --git a/arch/arm/plat-omap/omap-pm-helper.c b/arch/arm/plat-omap/omap-pm-helper.c
new file mode 100644
index 0000000..2fdd7af
--- /dev/null
+++ b/arch/arm/plat-omap/omap-pm-helper.c
@@ -0,0 +1,345 @@
+/*
+ * omap-pm.c - OMAP power management interface
+ *
+ * Copyright (C) 2008-2011 Texas Instruments, Inc.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ * Vishwanath BS
+ *
+ * This code is based on plat-omap/omap-pm-noop.c.
+ *
+ * Interface developed by (in alphabetical order):
+ * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan
+ * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff
+ */
+
+#undef DEBUG
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/* Interface documentation is in mach/omap-pm.h */
+#include <plat/omap-pm.h>
+#include <plat/omap_device.h>
+#include <plat/common.h>
+#include "../mach-omap2/powerdomain.h"
+#include "../mach-omap2/dvfs.h"
+#include "omap-pm-helper.h"
+
+struct omap_opp *dsp_opps;
+struct omap_opp *mpu_opps;
+struct omap_opp *l3_opps;
+
+static DEFINE_MUTEX(bus_tput_mutex);
+static DEFINE_MUTEX(mpu_tput_mutex);
+static DEFINE_MUTEX(mpu_lat_mutex);
+
+/* Used to model a Interconnect Throughput */
+static struct interconnect_tput {
+ /* Total no of users at any point of interconnect */
+ u8 no_of_users;
+ /* List of all the current users for interconnect */
+ struct list_head users_list;
+ struct list_head node;
+ /* Protect interconnect throughput */
+ struct mutex throughput_mutex;
+ /* Target level for interconnect throughput */
+ unsigned long target_level;
+
+} *bus_tput;
+
+/* Used to represent a user of a interconnect throughput */
+struct users {
+ /* Device pointer used to uniquely identify the user */
+ struct device *dev;
+ struct list_head node;
+ /* Current level as requested for interconnect throughput by the user */
+ u32 level;
+};
+
+/* Private/Internal Functions */
+
+/**
+ * user_lookup - look up a user by its device pointer, return a pointer
+ * @dev: The device to be looked up
+ *
+ * Looks for a interconnect user by its device pointer. Returns a
+ * pointer to
+ * the struct users if found, else returns NULL.
+ */
+static struct users *user_lookup(struct device *dev)
+{
+ struct users *usr, *tmp_usr;
+
+ usr = NULL;
+ list_for_each_entry(tmp_usr, &bus_tput->users_list, node) {
+ if (tmp_usr->dev == dev) {
+ usr = tmp_usr;
+ break;
+ }
+ }
+
+ return usr;
+}
+
+/**
+ * get_user - gets a new users_list struct dynamically
+ *
+ * This function allocates dynamcially the user node
+ * Returns a pointer to users struct on success. On dynamic allocation
+ * failure
+ * returns a ERR_PTR(-ENOMEM).
+ */
+static struct users *get_user(void)
+{
+ struct users *user;
+
+ user = kmalloc(sizeof(struct users), GFP_KERNEL);
+ if (!user) {
+ pr_err("%s FATAL ERROR: kmalloc failed\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+ return user;
+}
+
+#ifdef CONFIG_PM_DEBUG
+static int pm_dbg_show_tput(struct seq_file *s, void *unused)
+{
+ struct users *usr;
+
+ mutex_lock(&bus_tput->throughput_mutex);
+ list_for_each_entry(usr, &bus_tput->users_list, node)
+ seq_printf(s, "%s: %u\n", dev_name(usr->dev),
+ usr->level);
+ mutex_unlock(&bus_tput->throughput_mutex);
+
+ return 0;
+}
+
+static int pm_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pm_dbg_show_tput,
+ &inode->i_private);
+}
+
+static const struct file_operations tputdebugfs_fops = {
+ .open = pm_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+/**
+ * omap_bus_tput_init - Initializes the interconnect throughput
+ * userlist
+ * Allocates memory for global throughput variable dynamically.
+ * Intializes Userlist, no. of users and throughput target level.
+ * Returns 0 on sucess, else returns EINVAL if memory
+ * allocation fails.
+ */
+static int __init omap_bus_tput_init(void)
+{
+ bus_tput = kmalloc(sizeof(struct interconnect_tput), GFP_KERNEL);
+ if (!bus_tput) {
+ pr_err("%s FATAL ERROR: kmalloc failed\n", __func__);
+ return -EINVAL;
+ }
+ INIT_LIST_HEAD(&bus_tput->users_list);
+ mutex_init(&bus_tput->throughput_mutex);
+ bus_tput->no_of_users = 0;
+ bus_tput->target_level = 0;
+
+#ifdef CONFIG_PM_DEBUG
+ (void) debugfs_create_file("tput", S_IRUGO,
+ NULL, (void *)bus_tput, &tputdebugfs_fops);
+#endif
+
+ return 0;
+}
+
+/**
+ * add_req_tput - Request for a required level by a device
+ * @dev: Uniquely identifes the caller
+ * @level: The requested level for the interconnect bandwidth in KiB/s
+ *
+ * This function recomputes the target level of the interconnect
+ * bandwidth
+ * based on the level requested by all the users.
+ * Multiple calls to this function by the same device will
+ * replace the previous level requested
+ * Returns the updated level of interconnect throughput.
+ * In case of Invalid dev or user pointer, it returns 0.
+ */
+static unsigned long add_req_tput(struct device *dev, unsigned long level)
+{
+ int ret;
+ struct users *user;
+
+ if (!dev) {
+ pr_err("Invalid dev pointer\n");
+ ret = 0;
+ }
+ mutex_lock(&bus_tput->throughput_mutex);
+ user = user_lookup(dev);
+ if (user == NULL) {
+ user = get_user();
+ if (IS_ERR(user)) {
+ pr_err("Couldn't get user from the list to"
+ "add new throughput constraint");
+ ret = 0;
+ goto unlock;
+ }
+ bus_tput->target_level += level;
+ bus_tput->no_of_users++;
+ user->dev = dev;
+ list_add(&user->node, &bus_tput->users_list);
+ user->level = level;
+ } else {
+ bus_tput->target_level -= user->level;
+ bus_tput->target_level += level;
+ user->level = level;
+ }
+ ret = bus_tput->target_level;
+unlock:
+ mutex_unlock(&bus_tput->throughput_mutex);
+ return ret;
+}
+
+/**
+ * remove_req_tput - Release a previously requested level of
+ * a throughput level for interconnect
+ * @dev: Device pointer to dev
+ *
+ * This function recomputes the target level of the interconnect
+ * throughput after removing
+ * the level requested by the user.
+ * Returns 0, if the dev structure is invalid
+ * else returns modified interconnect throughput rate.
+ */
+static unsigned long remove_req_tput(struct device *dev)
+{
+ struct users *user;
+ int found = 0;
+ int ret;
+
+ mutex_lock(&bus_tput->throughput_mutex);
+ list_for_each_entry(user, &bus_tput->users_list, node) {
+ if (user->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ /* No such user exists */
+ pr_err("Invalid Device Structure\n");
+ ret = 0;
+ goto unlock;
+ }
+ bus_tput->target_level -= user->level;
+ bus_tput->no_of_users--;
+ list_del(&user->node);
+ kfree(user);
+ ret = bus_tput->target_level;
+unlock:
+ mutex_unlock(&bus_tput->throughput_mutex);
+ return ret;
+}
+
+int omap_pm_set_min_bus_tput_helper(struct device *dev, u8 agent_id, long r)
+{
+
+ int ret = 0;
+ struct device *l3_dev;
+ static struct device dummy_l3_dev = {
+ .init_name = "omap_pm_set_min_bus_tput",
+ };
+ unsigned long target_level = 0;
+
+ mutex_lock(&bus_tput_mutex);
+
+ l3_dev = omap2_get_l3_device();
+ if (!l3_dev) {
+ pr_err("Unable to get l3 device pointer");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (r == -1)
+ target_level = remove_req_tput(dev);
+ else
+ target_level = add_req_tput(dev, r);
+
+ /* Convert the throughput(in KiB/s) into Hz. */
+ target_level = (target_level * 1000) / 4;
+
+ ret = omap_device_scale(&dummy_l3_dev, l3_dev, target_level);
+ if (ret)
+ pr_err("Failed: change interconnect bandwidth to %ld\n",
+ target_level);
+unlock:
+ mutex_unlock(&bus_tput_mutex);
+ return ret;
+}
+
+int omap_pm_set_max_dev_wakeup_lat_helper(struct device *req_dev,
+ struct device *dev, long t)
+{
+ struct omap_device *odev;
+ struct powerdomain *pwrdm_dev;
+ struct platform_device *pdev;
+ int ret = 0;
+
+ if (!req_dev || !dev || t < -1) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ };
+
+ /* Look for the devices Power Domain */
+ pdev = container_of(dev, struct platform_device, dev);
+
+ /* Try to catch non platform devices. */
+ if (pdev->name == NULL) {
+ pr_err("OMAP-PM: Error: platform device not valid\n");
+ return -EINVAL;
+ }
+
+ odev = to_omap_device(pdev);
+ if (odev) {
+ pwrdm_dev = omap_device_get_pwrdm(odev);
+ } else {
+ pr_err("OMAP-PM: Error: Could not find omap_device for %s\n",
+ pdev->name);
+ return -EINVAL;
+ }
+
+ /* Catch devices with undefined powerdomains. */
+ if (!pwrdm_dev) {
+ pr_err("OMAP-PM: Error: could not find parent pwrdm for %s\n",
+ pdev->name);
+ return -EINVAL;
+ }
+
+ if (t == -1)
+ ret = pwrdm_wakeuplat_release_constraint(pwrdm_dev, req_dev);
+ else
+ ret = pwrdm_wakeuplat_set_constraint(pwrdm_dev, req_dev, t);
+
+ return ret;
+}
+
+/* Must be called after clock framework is initialized */
+int __init omap_pm_if_init_helper(void)
+{
+ int ret;
+ ret = omap_bus_tput_init();
+ if (ret)
+ pr_err("Failed: init of interconnect bandwidth users list\n");
+ return ret;
+}
diff --git a/arch/arm/plat-omap/omap-pm-helper.h b/arch/arm/plat-omap/omap-pm-helper.h
new file mode 100644
index 0000000..9c4b5d7
--- /dev/null
+++ b/arch/arm/plat-omap/omap-pm-helper.h
@@ -0,0 +1,40 @@
+/*
+ * OMAP PM interface helpers
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OMAP_PM_HELPER_INTERFACE_H__
+#define __OMAP_PM_HELPER_INTERFACE_H__
+
+#ifdef CONFIG_OMAP_PM
+int omap_pm_set_min_bus_tput_helper(struct device *dev, u8 agent_id, long r);
+int omap_pm_set_max_dev_wakeup_lat_helper(struct device *req_dev,
+ struct device *dev, long t);
+int __init omap_pm_if_init_helper(void);
+
+#else
+static inline int omap_pm_set_min_bus_tput_helper(struct device *dev,
+ u8 agent_id, long r)
+{
+ return 0;
+}
+
+static inline int omap_pm_set_max_dev_wakeup_lat_helper(struct device *req_dev,
+ struct device *dev, long t)
+{
+ return 0;
+}
+
+static inline int omap_pm_if_init_helper(void)
+{
+ return 0;
+}
+#endif /* CONFIG_OMAP_PM */
+
+#endif /* __OMAP_PM_HELPER_INTERFACE_H__ */
diff --git a/arch/arm/plat-omap/omap-pm-interface.c b/arch/arm/plat-omap/omap-pm-interface.c
new file mode 100644
index 0000000..e166395
--- /dev/null
+++ b/arch/arm/plat-omap/omap-pm-interface.c
@@ -0,0 +1,251 @@
+/*
+ * omap-pm-interface.c - OMAP power management interface
+ *
+ * This code implements the OMAP power management interface to
+ * drivers, CPUIdle, CPUFreq, and DSP Bridge.
+ *
+ * Copyright (C) 2008-2011 Texas Instruments, Inc.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * Interface developed by (in alphabetical order):
+ * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan
+ * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff
+ */
+
+#undef DEBUG
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+/* Interface documentation is in mach/omap-pm.h */
+#include <plat/omap-pm.h>
+#include <plat/omap_device.h>
+
+#include "omap-pm-helper.h"
+#include "../mach-omap2/prm44xx.h"
+
+bool off_mode_enabled;
+
+/*
+ * Device-driver-originated constraints (via board-*.c files)
+ * WARNING: Device drivers need to now use pm_qos directly.
+ */
+int omap_pm_set_max_mpu_wakeup_lat(struct pm_qos_request_list **pmqos_req,
+ long t)
+{
+ WARN(1, "Deprecated %s: Driver should use pm_qos to add request\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, long r)
+{
+ int ret;
+ if (!dev || (agent_id != OCP_INITIATOR_AGENT &&
+ agent_id != OCP_TARGET_AGENT)) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ };
+
+ if (r == -1)
+ pr_debug("OMAP PM: remove min bus tput constraint: "
+ "dev %s for agent_id %d\n", dev_name(dev), agent_id);
+ else
+ pr_debug("OMAP PM: add min bus tput constraint: "
+ "dev %s for agent_id %d: rate %ld KiB\n",
+ dev_name(dev), agent_id, r);
+
+ ret = omap_pm_set_min_bus_tput_helper(dev, agent_id, r);
+
+ return ret;
+}
+
+int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
+ long t)
+{
+ int ret;
+ if (!req_dev || !dev || t < -1) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ };
+
+ if (t == -1)
+ pr_debug("OMAP PM: remove max device latency constraint: "
+ "dev %s\n", dev_name(dev));
+ else
+ pr_debug("OMAP PM: add max device latency constraint: "
+ "dev %s, t = %ld usec\n", dev_name(dev), t);
+
+ ret = omap_pm_set_max_dev_wakeup_lat_helper(req_dev, dev, t);
+
+ return ret;
+}
+
+/* WARNING: Device drivers need to now use pm_qos directly. */
+int omap_pm_set_max_sdma_lat(struct pm_qos_request_list **qos_request, long t)
+{
+ WARN(1, "Deprecated %s: Driver should use pm_qos to add request\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+int omap_pm_set_min_clk_rate(struct device *dev, struct clk *c, long r)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+/*
+ * DSP Bridge-specific constraints
+ * WARNING: Device drivers need to now use opp layer/omap_device_scale directly.
+ */
+const struct omap_opp *omap_pm_dsp_get_opp_table(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return ERR_PTR(-EINVAL);
+}
+
+void omap_pm_dsp_set_min_opp(u8 opp_id)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return;
+}
+
+int omap_pm_set_min_mpu_freq(struct device *dev, unsigned long f)
+{
+ WARN(1, "Deprecated %s: Driver should NOT use this function\n",
+ __func__);
+
+ return -EINVAL;
+
+}
+
+EXPORT_SYMBOL(omap_pm_set_min_mpu_freq);
+
+u8 omap_pm_dsp_get_opp(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return 0;
+}
+
+/*
+ * CPUFreq-originated constraint
+ *
+ * In the future, this should be handled by custom OPP clocktype
+ * functions.
+ */
+
+struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return ERR_PTR(-EINVAL);
+}
+
+void omap_pm_cpu_set_freq(unsigned long f)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return;
+}
+
+unsigned long omap_pm_cpu_get_freq(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return 0;
+}
+
+/**
+ * omap_pm_enable_off_mode - notify OMAP PM that off-mode is enabled
+ *
+ * Intended for use only by OMAP PM core code to notify this layer
+ * that off mode has been enabled.
+ */
+void omap_pm_enable_off_mode(void)
+{
+ off_mode_enabled = true;
+}
+
+/**
+ * omap_pm_disable_off_mode - notify OMAP PM that off-mode is disabled
+ *
+ * Intended for use only by OMAP PM core code to notify this layer
+ * that off mode has been disabled.
+ */
+void omap_pm_disable_off_mode(void)
+{
+ off_mode_enabled = false;
+}
+
+bool omap_pm_was_context_lost(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct omap_device *od;
+ struct omap_hwmod *oh;
+
+ if (!dev)
+ goto save_ctx;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ od = container_of(pdev, struct omap_device, pdev);
+ oh = od->hwmods[0];
+
+ if (!oh || !cpu_is_omap44xx())
+ goto save_ctx;
+
+ if (oh->prcm.omap4.context_reg) {
+ u32 context_reg_val = 0;
+
+ /*Read what context was lost.*/
+ context_reg_val = __raw_readl(oh->prcm.omap4.context_reg);
+
+ /*clear context lost bits after read*/
+ __raw_writel(context_reg_val, oh->prcm.omap4.context_reg);
+
+ /* ABE special case, only report ctx lost when we loose
+ * mem, otherwise, constant firmware reload causes problems.
+ */
+ if (oh->prcm.omap4.context_reg == OMAP4430_RM_ABE_AESS_CONTEXT)
+ context_reg_val &= (1 << 8);
+
+ return (context_reg_val != 0);
+ }
+
+save_ctx:
+ /* by default return true so that driver will restore context*/
+ return true;
+}
+
+/* Should be called before clk framework init */
+int __init omap_pm_if_early_init(void)
+{
+ return 0;
+}
+
+/* Must be called after clock framework is initialized */
+int __init omap_pm_if_init(void)
+{
+ return omap_pm_if_init_helper();
+}
+
+void omap_pm_if_exit(void)
+{
+ /* Deallocate CPUFreq frequency table here */
+}
diff --git a/arch/arm/plat-omap/omap-pm-noop.c b/arch/arm/plat-omap/omap-pm-noop.c
deleted file mode 100644
index b0471bb2..0000000
--- a/arch/arm/plat-omap/omap-pm-noop.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * omap-pm-noop.c - OMAP power management interface - dummy version
- *
- * This code implements the OMAP power management interface to
- * drivers, CPUIdle, CPUFreq, and DSP Bridge. It is strictly for
- * debug/demonstration use, as it does nothing but printk() whenever a
- * function is called (when DEBUG is defined, below)
- *
- * Copyright (C) 2008-2009 Texas Instruments, Inc.
- * Copyright (C) 2008-2009 Nokia Corporation
- * Paul Walmsley
- *
- * Interface developed by (in alphabetical order):
- * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan
- * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff
- */
-
-#undef DEBUG
-
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-
-/* Interface documentation is in mach/omap-pm.h */
-#include <plat/omap-pm.h>
-#include <plat/omap_device.h>
-
-static bool off_mode_enabled;
-static u32 dummy_context_loss_counter;
-
-/*
- * Device-driver-originated constraints (via board-*.c files)
- */
-
-int omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t)
-{
- if (!dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (t == -1)
- pr_debug("OMAP PM: remove max MPU wakeup latency constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add max MPU wakeup latency constraint: "
- "dev %s, t = %ld usec\n", dev_name(dev), t);
-
- /*
- * For current Linux, this needs to map the MPU to a
- * powerdomain, then go through the list of current max lat
- * constraints on the MPU and find the smallest. If
- * the latency constraint has changed, the code should
- * recompute the state to enter for the next powerdomain
- * state.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r)
-{
- if (!dev || (agent_id != OCP_INITIATOR_AGENT &&
- agent_id != OCP_TARGET_AGENT)) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (r == 0)
- pr_debug("OMAP PM: remove min bus tput constraint: "
- "dev %s for agent_id %d\n", dev_name(dev), agent_id);
- else
- pr_debug("OMAP PM: add min bus tput constraint: "
- "dev %s for agent_id %d: rate %ld KiB\n",
- dev_name(dev), agent_id, r);
-
- /*
- * This code should model the interconnect and compute the
- * required clock frequency, convert that to a VDD2 OPP ID, then
- * set the VDD2 OPP appropriately.
- *
- * TI CDP code can call constraint_set here on the VDD2 OPP.
- */
-
- return 0;
-}
-
-int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
- long t)
-{
- if (!req_dev || !dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (t == -1)
- pr_debug("OMAP PM: remove max device latency constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add max device latency constraint: "
- "dev %s, t = %ld usec\n", dev_name(dev), t);
-
- /*
- * For current Linux, this needs to map the device to a
- * powerdomain, then go through the list of current max lat
- * constraints on that powerdomain and find the smallest. If
- * the latency constraint has changed, the code should
- * recompute the state to enter for the next powerdomain
- * state. Conceivably, this code should also determine
- * whether to actually disable the device clocks or not,
- * depending on how long it takes to re-enable the clocks.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_max_sdma_lat(struct device *dev, long t)
-{
- if (!dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (t == -1)
- pr_debug("OMAP PM: remove max DMA latency constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add max DMA latency constraint: "
- "dev %s, t = %ld usec\n", dev_name(dev), t);
-
- /*
- * For current Linux PM QOS params, this code should scan the
- * list of maximum CPU and DMA latencies and select the
- * smallest, then set cpu_dma_latency pm_qos_param
- * accordingly.
- *
- * For future Linux PM QOS params, with separate CPU and DMA
- * latency params, this code should just set the dma_latency param.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_min_clk_rate(struct device *dev, struct clk *c, long r)
-{
- if (!dev || !c || r < 0) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- }
-
- if (r == 0)
- pr_debug("OMAP PM: remove min clk rate constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add min clk rate constraint: "
- "dev %s, rate = %ld Hz\n", dev_name(dev), r);
-
- /*
- * Code in a real implementation should keep track of these
- * constraints on the clock, and determine the highest minimum
- * clock rate. It should iterate over each OPP and determine
- * whether the OPP will result in a clock rate that would
- * satisfy this constraint (and any other PM constraint in effect
- * at that time). Once it finds the lowest-voltage OPP that
- * meets those conditions, it should switch to it, or return
- * an error if the code is not capable of doing so.
- */
-
- return 0;
-}
-
-/*
- * DSP Bridge-specific constraints
- */
-
-const struct omap_opp *omap_pm_dsp_get_opp_table(void)
-{
- pr_debug("OMAP PM: DSP request for OPP table\n");
-
- /*
- * Return DSP frequency table here: The final item in the
- * array should have .rate = .opp_id = 0.
- */
-
- return NULL;
-}
-
-void omap_pm_dsp_set_min_opp(u8 opp_id)
-{
- if (opp_id == 0) {
- WARN_ON(1);
- return;
- }
-
- pr_debug("OMAP PM: DSP requests minimum VDD1 OPP to be %d\n", opp_id);
-
- /*
- *
- * For l-o dev tree, our VDD1 clk is keyed on OPP ID, so we
- * can just test to see which is higher, the CPU's desired OPP
- * ID or the DSP's desired OPP ID, and use whichever is
- * highest.
- *
- * In CDP12.14+, the VDD1 OPP custom clock that controls the DSP
- * rate is keyed on MPU speed, not the OPP ID. So we need to
- * map the OPP ID to the MPU speed for use with clk_set_rate()
- * if it is higher than the current OPP clock rate.
- *
- */
-}
-
-
-u8 omap_pm_dsp_get_opp(void)
-{
- pr_debug("OMAP PM: DSP requests current DSP OPP ID\n");
-
- /*
- * For l-o dev tree, call clk_get_rate() on VDD1 OPP clock
- *
- * CDP12.14+:
- * Call clk_get_rate() on the OPP custom clock, map that to an
- * OPP ID using the tables defined in board-*.c/chip-*.c files.
- */
-
- return 0;
-}
-
-/*
- * CPUFreq-originated constraint
- *
- * In the future, this should be handled by custom OPP clocktype
- * functions.
- */
-
-struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void)
-{
- pr_debug("OMAP PM: CPUFreq request for frequency table\n");
-
- /*
- * Return CPUFreq frequency table here: loop over
- * all VDD1 clkrates, pull out the mpu_ck frequencies, build
- * table
- */
-
- return NULL;
-}
-
-void omap_pm_cpu_set_freq(unsigned long f)
-{
- if (f == 0) {
- WARN_ON(1);
- return;
- }
-
- pr_debug("OMAP PM: CPUFreq requests CPU frequency to be set to %lu\n",
- f);
-
- /*
- * For l-o dev tree, determine whether MPU freq or DSP OPP id
- * freq is higher. Find the OPP ID corresponding to the
- * higher frequency. Call clk_round_rate() and clk_set_rate()
- * on the OPP custom clock.
- *
- * CDP should just be able to set the VDD1 OPP clock rate here.
- */
-}
-
-unsigned long omap_pm_cpu_get_freq(void)
-{
- pr_debug("OMAP PM: CPUFreq requests current CPU frequency\n");
-
- /*
- * Call clk_get_rate() on the mpu_ck.
- */
-
- return 0;
-}
-
-/**
- * omap_pm_enable_off_mode - notify OMAP PM that off-mode is enabled
- *
- * Intended for use only by OMAP PM core code to notify this layer
- * that off mode has been enabled.
- */
-void omap_pm_enable_off_mode(void)
-{
- off_mode_enabled = true;
-}
-
-/**
- * omap_pm_disable_off_mode - notify OMAP PM that off-mode is disabled
- *
- * Intended for use only by OMAP PM core code to notify this layer
- * that off mode has been disabled.
- */
-void omap_pm_disable_off_mode(void)
-{
- off_mode_enabled = false;
-}
-
-/*
- * Device context loss tracking
- */
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
-
-u32 omap_pm_get_dev_context_loss_count(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- u32 count;
-
- if (WARN_ON(!dev))
- return 0;
-
- if (dev->parent == &omap_device_parent) {
- count = omap_device_get_context_loss_count(pdev);
- } else {
- WARN_ONCE(off_mode_enabled, "omap_pm: using dummy context loss counter; device %s should be converted to omap_device",
- dev_name(dev));
- if (off_mode_enabled)
- dummy_context_loss_counter++;
- count = dummy_context_loss_counter;
- }
-
- pr_debug("OMAP PM: context loss count for dev %s = %d\n",
- dev_name(dev), count);
-
- return count;
-}
-
-#else
-
-u32 omap_pm_get_dev_context_loss_count(struct device *dev)
-{
- return dummy_context_loss_counter;
-}
-
-#endif
-
-/* Should be called before clk framework init */
-int __init omap_pm_if_early_init(void)
-{
- return 0;
-}
-
-/* Must be called after clock framework is initialized */
-int __init omap_pm_if_init(void)
-{
- return 0;
-}
-
-void omap_pm_if_exit(void)
-{
- /* Deallocate CPUFreq frequency table here */
-}
-
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 49fc0df..92b4496 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -146,12 +146,12 @@ static int _omap_device_activate(struct omap_device *od, u8 ignore_lat)
odpl->activate_lat_worst = act_lat;
if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
odpl->activate_lat = act_lat;
- pr_warning("omap_device: %s.%d: new worst case "
+ pr_debug("omap_device: %s.%d: new worst case "
"activate latency %d: %llu\n",
od->pdev.name, od->pdev.id,
od->pm_lat_level, act_lat);
} else
- pr_warning("omap_device: %s.%d: activate "
+ pr_debug("omap_device: %s.%d: activate "
"latency %d higher than exptected. "
"(%llu > %d)\n",
od->pdev.name, od->pdev.id,
@@ -214,12 +214,12 @@ static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat)
odpl->deactivate_lat_worst = deact_lat;
if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
odpl->deactivate_lat = deact_lat;
- pr_warning("omap_device: %s.%d: new worst case "
+ pr_debug("omap_device: %s.%d: new worst case "
"deactivate latency %d: %llu\n",
od->pdev.name, od->pdev.id,
od->pm_lat_level, deact_lat);
} else
- pr_warning("omap_device: %s.%d: deactivate "
+ pr_debug("omap_device: %s.%d: deactivate "
"latency %d higher than exptected. "
"(%llu > %d)\n",
od->pdev.name, od->pdev.id,
@@ -311,7 +311,7 @@ static void _add_optional_clock_clkdev(struct omap_device *od,
* return the context loss counter for that hwmod, otherwise return
* zero.
*/
-u32 omap_device_get_context_loss_count(struct platform_device *pdev)
+int omap_device_get_context_loss_count(struct platform_device *pdev)
{
struct omap_device *od;
u32 ret = 0;
diff --git a/arch/arm/plat-omap/omap_rpmsg.c b/arch/arm/plat-omap/omap_rpmsg.c
new file mode 100644
index 0000000..c0257be
--- /dev/null
+++ b/arch/arm/plat-omap/omap_rpmsg.c
@@ -0,0 +1,601 @@
+/*
+ * Remote processor messaging transport (OMAP platform-specific bits)
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Authors: Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/rpmsg.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/memblock.h>
+#include <linux/remoteproc.h>
+#include <asm/io.h>
+
+#include <plat/rpmsg.h>
+#include <plat/mailbox.h>
+#include <plat/remoteproc.h>
+
+struct omap_rpmsg_vproc {
+ struct virtio_device vdev;
+ unsigned int vring[2]; /* mpu owns first vring, ipu owns the 2nd */
+ unsigned int buf_addr;
+ unsigned int buf_size; /* must be page-aligned */
+ void *buf_mapped;
+ char *mbox_name;
+ char *rproc_name;
+ struct omap_mbox *mbox;
+ struct rproc *rproc;
+ struct notifier_block nb;
+ struct notifier_block rproc_nb;
+ struct work_struct reset_work;
+ bool slave_reset;
+ struct omap_rpmsg_vproc *slave_next;
+ struct virtqueue *vq[2];
+ int base_vq_id;
+ int num_of_vqs;
+ struct rpmsg_channel_info *hardcoded_chnls;
+};
+
+#define to_omap_rpdev(vd) container_of(vd, struct omap_rpmsg_vproc, vdev)
+static void rpmsg_reset_work(struct work_struct *work);
+
+struct omap_rpmsg_vq_info {
+ __u16 num; /* number of entries in the virtio_ring */
+ __u16 vq_id; /* a globaly unique index of this virtqueue */
+ void *addr; /* address where we mapped the virtio ring */
+ struct omap_rpmsg_vproc *rpdev;
+};
+
+/*
+ * For now, allocate 256 buffers of 512 bytes for each side. each buffer
+ * will then have 16B for the msg header and 496B for the payload.
+ * This will require a total space of 256KB for the buffers themselves, and
+ * 3 pages for every vring (the size of the vring depends on the number of
+ * buffers it supports).
+ */
+#define RPMSG_NUM_BUFS (512)
+#define RPMSG_BUF_SIZE (512)
+#define RPMSG_BUFS_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
+
+/*
+ * The alignment between the consumer and producer parts of the vring.
+ * Note: this is part of the "wire" protocol. If you change this, you need
+ * to update your BIOS image as well
+ */
+#define RPMSG_VRING_ALIGN (4096)
+
+/* With 256 buffers, our vring will occupy 3 pages */
+#define RPMSG_RING_SIZE ((DIV_ROUND_UP(vring_size(RPMSG_NUM_BUFS / 2, \
+ RPMSG_VRING_ALIGN), PAGE_SIZE)) * PAGE_SIZE)
+
+/* The total IPC space needed to communicate with a remote processor */
+#define RPMSG_IPC_MEM (RPMSG_BUFS_SPACE + 2 * RPMSG_RING_SIZE)
+
+/* provide drivers with platform-specific details */
+static void omap_rpmsg_get(struct virtio_device *vdev, unsigned int request,
+ void *buf, unsigned len)
+{
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+ void *presult;
+ int iresult;
+
+ switch (request) {
+ case VPROC_BUF_ADDR:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(rpdev->buf_mapped));
+ memcpy(buf, &rpdev->buf_mapped, len);
+ break;
+ case VPROC_SIM_BASE:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(presult));
+ /*
+ * calculate a simulated base address to make virtio's
+ * virt_to_page() happy.
+ */
+ presult = __va(rpdev->buf_addr);
+ memcpy(buf, &presult, len);
+ break;
+ case VPROC_BUF_NUM:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(iresult));
+ iresult = RPMSG_NUM_BUFS;
+ memcpy(buf, &iresult, len);
+ break;
+ case VPROC_BUF_SZ:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(iresult));
+ iresult = RPMSG_BUF_SIZE;
+ memcpy(buf, &iresult, len);
+ break;
+ case VPROC_STATIC_CHANNELS:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(rpdev->hardcoded_chnls));
+ memcpy(buf, &rpdev->hardcoded_chnls, len);
+ break;
+ default:
+ dev_err(&vdev->dev, "invalid request: %d\n", request);
+ }
+}
+
+/* kick the remote processor, and let it know which virtqueue to poke at */
+static void omap_rpmsg_notify(struct virtqueue *vq)
+{
+ struct omap_rpmsg_vq_info *rpvq = vq->priv;
+ int ret;
+
+ pr_debug("sending mailbox msg: %d\n", rpvq->vq_id);
+ rproc_last_busy(rpvq->rpdev->rproc);
+ /* send the index of the triggered virtqueue as the mailbox payload */
+ ret = omap_mbox_msg_send(rpvq->rpdev->mbox, rpvq->vq_id);
+ if (ret)
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", ret);
+}
+
+static int omap_rpmsg_mbox_callback(struct notifier_block *this,
+ unsigned long index, void *data)
+{
+ mbox_msg_t msg = (mbox_msg_t) data;
+ struct omap_rpmsg_vproc *rpdev;
+
+ rpdev = container_of(this, struct omap_rpmsg_vproc, nb);
+
+ pr_debug("mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ pr_err("%s has just crashed !\n", rpdev->rproc_name);
+ rproc_error_notify(rpdev->rproc);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ pr_info("received echo reply from %s !\n", rpdev->rproc_name);
+ break;
+ case RP_MBOX_PENDING_MSG:
+ /*
+ * a new inbound message is waiting in our own vring (index 0).
+ * Let's pretend the message explicitly contained the vring
+ * index number and handle it generically
+ */
+ msg = rpdev->base_vq_id;
+ /* intentional fall-through */
+ default:
+ /* ignore vq indices which are clearly not for us */
+ if (msg < rpdev->base_vq_id)
+ break;
+
+ msg -= rpdev->base_vq_id;
+
+ /*
+ * Currently both PENDING_MSG and explicit-virtqueue-index
+ * messaging are supported.
+ * Whatever approach is taken, at this point 'msg' contains
+ * the index of the vring which was just triggered.
+ */
+ if (msg < rpdev->num_of_vqs)
+ vring_interrupt(msg, rpdev->vq[msg]);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void rpmsg_reset_devices(struct omap_rpmsg_vproc *rpdev)
+{
+ /* wait until previous reset requests have finished */
+ flush_work_sync(&rpdev->reset_work);
+ schedule_work(&rpdev->reset_work);
+}
+
+static int rpmsg_rproc_error(struct omap_rpmsg_vproc *rpdev)
+{
+ pr_err("Fatal error in %s\n", rpdev->rproc_name);
+#ifdef CONFIG_OMAP_RPMSG_RECOVERY
+ if (rpdev->slave_reset)
+ return NOTIFY_DONE;
+ rpmsg_reset_devices(rpdev);
+#endif
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_suspend(struct omap_rpmsg_vproc *rpdev)
+{
+ if (virtqueue_more_used(rpdev->vq[0]))
+ return NOTIFY_BAD;
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_pos_suspend(struct omap_rpmsg_vproc *rpdev)
+{
+ if (rpdev->mbox) {
+ omap_mbox_put(rpdev->mbox, &rpdev->nb);
+ rpdev->mbox = NULL;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_resume(struct omap_rpmsg_vproc *rpdev)
+{
+ if (!rpdev->mbox)
+ rpdev->mbox = omap_mbox_get(rpdev->mbox_name, &rpdev->nb);
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_secure(struct omap_rpmsg_vproc *rpdev, bool s)
+{
+ pr_err("%s: %s secure mode\n", rpdev->rproc_name, s ? "enter" : "exit");
+ if (rpdev->slave_reset)
+ return NOTIFY_DONE;
+ rpmsg_reset_devices(rpdev);
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_events(struct notifier_block *this,
+ unsigned long type, void *data)
+{
+ struct omap_rpmsg_vproc *rpdev = container_of(this,
+ struct omap_rpmsg_vproc, rproc_nb);
+
+ switch (type) {
+ case RPROC_ERROR:
+ return rpmsg_rproc_error(rpdev);
+ case RPROC_PRE_SUSPEND:
+ return rpmsg_rproc_suspend(rpdev);
+ case RPROC_POS_SUSPEND:
+ return rpmsg_rproc_pos_suspend(rpdev);
+ case RPROC_RESUME:
+ return rpmsg_rproc_resume(rpdev);
+ case RPROC_SECURE:
+ return rpmsg_rproc_secure(rpdev, !!data);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+ struct omap_rpmsg_vq_info *rpvq;
+ struct virtqueue *vq;
+ int err;
+
+ rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
+ if (!rpvq)
+ return ERR_PTR(-ENOMEM);
+
+ /* ioremap'ing normal memory, so we cast away sparse's complaints */
+ rpvq->addr = (__force void *) ioremap_nocache(rpdev->vring[index],
+ RPMSG_RING_SIZE);
+ if (!rpvq->addr) {
+ err = -ENOMEM;
+ goto free_rpvq;
+ }
+
+ memset(rpvq->addr, 0, RPMSG_RING_SIZE);
+
+ pr_debug("vring%d: phys 0x%x, virt 0x%x\n", index, rpdev->vring[index],
+ (unsigned int) rpvq->addr);
+
+ vq = vring_new_virtqueue(RPMSG_NUM_BUFS / 2, RPMSG_VRING_ALIGN, vdev,
+ rpvq->addr, omap_rpmsg_notify, callback, name);
+ if (!vq) {
+ pr_err("vring_new_virtqueue failed\n");
+ err = -ENOMEM;
+ goto unmap_vring;
+ }
+
+ rpdev->vq[index] = vq;
+ vq->priv = rpvq;
+ /* system-wide unique id for this virtqueue */
+ rpvq->vq_id = rpdev->base_vq_id + index;
+ rpvq->rpdev = rpdev;
+
+ return vq;
+
+unmap_vring:
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rpvq->addr);
+free_rpvq:
+ kfree(rpvq);
+ return ERR_PTR(err);
+}
+
+static void omap_rpmsg_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+
+ rproc_event_unregister(rpdev->rproc, &rpdev->rproc_nb);
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ struct omap_rpmsg_vq_info *rpvq = vq->priv;
+ iounmap(rpvq->addr);
+ vring_del_virtqueue(vq);
+ kfree(rpvq);
+ }
+
+ if (rpdev->mbox)
+ omap_mbox_put(rpdev->mbox, &rpdev->nb);
+
+ if (rpdev->rproc)
+ rproc_put(rpdev->rproc);
+
+ iounmap(rpdev->buf_mapped);
+}
+
+static int omap_rpmsg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+ int i, err;
+
+ /* we maintain two virtqueues per remote processor (for RX and TX) */
+ if (nvqs != 2)
+ return -EINVAL;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ rpdev->num_of_vqs = nvqs;
+
+ /* ioremap'ing normal memory, so we cast away sparse's complaints */
+ rpdev->buf_mapped = (__force void *) ioremap_nocache(rpdev->buf_addr,
+ rpdev->buf_size);
+ if (!rpdev->buf_mapped) {
+ pr_err("ioremap failed\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* for now, use mailbox's notifiers. later that can be optimized */
+ rpdev->nb.notifier_call = omap_rpmsg_mbox_callback;
+ rpdev->mbox = omap_mbox_get(rpdev->mbox_name, &rpdev->nb);
+ if (IS_ERR(rpdev->mbox)) {
+ pr_err("failed to get mailbox %s\n", rpdev->mbox_name);
+ err = -EINVAL;
+ goto unmap_buf;
+ }
+
+ pr_debug("buf: phys 0x%x, virt 0x%x\n", rpdev->buf_addr,
+ (unsigned int) rpdev->buf_mapped);
+
+ /* tell the M3 we're ready. hmm. do we really need this msg */
+ err = omap_mbox_msg_send(rpdev->mbox, RP_MBOX_READY);
+ if (err) {
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", err);
+ goto put_mbox;
+ }
+
+ /* send it the physical address of the mapped buffer + vrings, */
+ /* this should be moved to the resource table logic */
+ err = omap_mbox_msg_send(rpdev->mbox, (mbox_msg_t) rpdev->buf_addr);
+ if (err) {
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", err);
+ goto put_mbox;
+ }
+
+ /* ping the remote processor. this is only for sanity-sake;
+ * there is no functional effect whatsoever */
+ err = omap_mbox_msg_send(rpdev->mbox, RP_MBOX_ECHO_REQUEST);
+ if (err) {
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", err);
+ goto put_mbox;
+ }
+
+ /* now load the firmware, and take the M3 out of reset */
+ rpdev->rproc = rproc_get(rpdev->rproc_name);
+ if (!rpdev->rproc) {
+ pr_err("failed to get rproc %s\n", rpdev->rproc_name);
+ err = -EINVAL;
+ goto put_mbox;
+ }
+ /* register for remoteproc events */
+ rpdev->rproc_nb.notifier_call = rpmsg_rproc_events;
+ rproc_event_register(rpdev->rproc, &rpdev->rproc_nb);
+
+ return 0;
+
+put_mbox:
+ omap_mbox_put(rpdev->mbox, &rpdev->nb);
+unmap_buf:
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *)rpdev->buf_mapped);
+error:
+ omap_rpmsg_del_vqs(vdev);
+ return err;
+}
+
+/*
+ * should be nice to add firmware support for these handlers.
+ * for now provide them so virtio doesn't crash
+ */
+static u8 omap_rpmsg_get_status(struct virtio_device *vdev)
+{
+ return 0;
+}
+
+static void omap_rpmsg_set_status(struct virtio_device *vdev, u8 status)
+{
+ dev_dbg(&vdev->dev, "new status: %d\n", status);
+}
+
+static void omap_rpmsg_reset(struct virtio_device *vdev)
+{
+ dev_dbg(&vdev->dev, "reset !\n");
+}
+
+static u32 omap_rpmsg_get_features(struct virtio_device *vdev)
+{
+ /* for now, use hardcoded bitmap. later this should be provided
+ * by the firmware itself */
+ return (1 << VIRTIO_RPMSG_F_NS);
+}
+
+static void omap_rpmsg_finalize_features(struct virtio_device *vdev)
+{
+ /* Give virtio_ring a chance to accept features */
+ vring_transport_features(vdev);
+}
+
+static void omap_rpmsg_vproc_release(struct device *dev)
+{
+ /* this handler is provided so driver core doesn't yell at us */
+}
+
+static void rpmsg_reset_work(struct work_struct *work)
+{
+ struct omap_rpmsg_vproc *rpdev =
+ container_of(work, struct omap_rpmsg_vproc, reset_work);
+ struct omap_rpmsg_vproc *tmp;
+ int ret;
+
+ for (tmp = rpdev; tmp; tmp = tmp->slave_next) {
+ pr_err("reseting virtio device %d\n", tmp->vdev.index);
+ unregister_virtio_device(&tmp->vdev);
+ }
+ for (tmp = rpdev; tmp; tmp = tmp->slave_next) {
+ memset(&tmp->vdev.dev, 0, sizeof(struct device));
+ tmp->vdev.dev.release = omap_rpmsg_vproc_release;
+ ret = register_virtio_device(&tmp->vdev);
+ if (ret)
+ pr_err("error creating virtio device %d\n", ret);
+ }
+}
+
+static struct virtio_config_ops omap_rpmsg_config_ops = {
+ .get_features = omap_rpmsg_get_features,
+ .finalize_features = omap_rpmsg_finalize_features,
+ .get = omap_rpmsg_get,
+ .find_vqs = omap_rpmsg_find_vqs,
+ .del_vqs = omap_rpmsg_del_vqs,
+ .reset = omap_rpmsg_reset,
+ .set_status = omap_rpmsg_set_status,
+ .get_status = omap_rpmsg_get_status,
+};
+
+static struct rpmsg_channel_info omap_ipuc0_hardcoded_chnls[] = {
+ { "rpmsg-resmgr", 100, RPMSG_ADDR_ANY },
+ { "rpmsg-server-sample", 137, RPMSG_ADDR_ANY },
+ { },
+};
+
+static struct rpmsg_channel_info omap_ipuc1_hardcoded_chnls[] = {
+ { "rpmsg-resmgr", 100, RPMSG_ADDR_ANY },
+ { },
+};
+
+static struct omap_rpmsg_vproc omap_rpmsg_vprocs[] = {
+ /* ipu_c0's rpmsg backend */
+ {
+ .vdev.id.device = VIRTIO_ID_RPMSG,
+ .vdev.config = &omap_rpmsg_config_ops,
+ .mbox_name = "mailbox-1",
+ .rproc_name = "ipu",
+ .base_vq_id = 0,
+ .hardcoded_chnls = omap_ipuc0_hardcoded_chnls,
+ .slave_next = &omap_rpmsg_vprocs[1],
+ },
+ /* ipu_c1's rpmsg backend */
+ {
+ .vdev.id.device = VIRTIO_ID_RPMSG,
+ .vdev.config = &omap_rpmsg_config_ops,
+ .mbox_name = "mailbox-1",
+ .rproc_name = "ipu",
+ .base_vq_id = 2,
+ .hardcoded_chnls = omap_ipuc1_hardcoded_chnls,
+ .slave_reset = true,
+ },
+};
+
+static int __init omap_rpmsg_ini(void)
+{
+ int i, ret = 0;
+ phys_addr_t paddr = omap_ipu_get_mempool_base(
+ OMAP_RPROC_MEMPOOL_STATIC);
+ phys_addr_t psize = omap_ipu_get_mempool_size(
+ OMAP_RPROC_MEMPOOL_STATIC);
+
+ for (i = 0; i < ARRAY_SIZE(omap_rpmsg_vprocs); i++) {
+ struct omap_rpmsg_vproc *rpdev = &omap_rpmsg_vprocs[i];
+
+ if (psize < RPMSG_IPC_MEM) {
+ pr_err("out of carveout memory: %d (%d)\n", psize, i);
+ return -ENOMEM;
+ }
+
+ /*
+ * vring buffers are expected to be present at the beginning
+ * of the chosen remoteproc pool
+ */
+ rpdev->buf_addr = paddr;
+ rpdev->buf_size = RPMSG_BUFS_SPACE;
+ rpdev->vring[0] = paddr + RPMSG_BUFS_SPACE;
+ rpdev->vring[1] = paddr + RPMSG_BUFS_SPACE + RPMSG_RING_SIZE;
+ INIT_WORK(&rpdev->reset_work, rpmsg_reset_work);
+
+ paddr += RPMSG_IPC_MEM;
+ psize -= RPMSG_IPC_MEM;
+
+ pr_debug("rpdev%d: buf 0x%x, vring0 0x%x, vring1 0x%x\n", i,
+ rpdev->buf_addr, rpdev->vring[0], rpdev->vring[1]);
+
+ rpdev->vdev.dev.release = omap_rpmsg_vproc_release;
+
+ ret = register_virtio_device(&rpdev->vdev);
+ if (ret) {
+ pr_err("failed to register rpdev: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+module_init(omap_rpmsg_ini);
+
+static void __exit omap_rpmsg_fini(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(omap_rpmsg_vprocs); i++) {
+ struct omap_rpmsg_vproc *rpdev = &omap_rpmsg_vprocs[i];
+
+ unregister_virtio_device(&rpdev->vdev);
+ }
+}
+module_exit(omap_rpmsg_fini);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("OMAP Remote processor messaging virtio device");
diff --git a/arch/arm/plat-omap/rproc_user.c b/arch/arm/plat-omap/rproc_user.c
new file mode 100644
index 0000000..083e4ae
--- /dev/null
+++ b/arch/arm/plat-omap/rproc_user.c
@@ -0,0 +1,185 @@
+/*
+ * Secure Mode Input interface to remoteproc driver
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * Authors: Suman Anna <s-anna@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/remoteproc.h>
+
+
+#define RPROC_USER_NAME "rproc_user"
+#define RPROC_USER_DEVICES 1
+
+static DEFINE_MUTEX(rproc_user_mutex);
+
+struct rproc_user_device {
+ struct miscdevice mdev;
+};
+
+static struct rproc_user_device *ipu_device;
+static char *rproc_user_name = RPROC_USER_NAME;
+static unsigned secure_cnt;
+
+static int rproc_user_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = NULL;
+ return 0;
+}
+
+static int rproc_user_release(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+
+ if (filp->private_data) {
+ mutex_lock(&rproc_user_mutex);
+ if (!--secure_cnt)
+ ret = rproc_set_secure("ipu", false);
+ mutex_unlock(&rproc_user_mutex);
+ if (ret)
+ pr_err("rproc normal start failed 0x%x, urghh!!", ret);
+ }
+ return ret;
+}
+
+static ssize_t rproc_user_read(struct file *filp, char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ u8 enable;
+ int ret = 1;
+
+ if (len != 1)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&rproc_user_mutex))
+ return -EINTR;
+ enable = secure_cnt ? 1 : 0;
+ if (copy_to_user((void *)ubuf, &enable, sizeof(enable)))
+ ret = -EFAULT;
+ mutex_unlock(&rproc_user_mutex);
+
+ return ret;
+}
+
+static ssize_t rproc_user_write(struct file *filp, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ int ret = 0;
+ u8 enable;
+
+ if (len != 1)
+ return -EINVAL;
+
+ if (copy_from_user(&enable, (char __user *) ubuf, sizeof(enable)))
+ return -EFAULT;
+
+ if (mutex_lock_interruptible(&rproc_user_mutex))
+ return -EINTR;
+
+ enable = enable ? 1 : 0;
+ if (enable == (int)filp->private_data) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (enable) {
+ case 1:
+ if (!secure_cnt++)
+ ret = rproc_set_secure("ipu", true);
+ if (!ret) {
+ filp->private_data = (void *)1;
+ goto out;
+ }
+ /* fall through in case of failure */
+ pr_err("rproc secure start failed, 0x%x\n", ret);
+ case 0:
+ if (!--secure_cnt)
+ ret = rproc_set_secure("ipu", false);
+ if (ret)
+ pr_err("rproc normal start failed 0x%x, urghh!!", ret);
+ else
+ filp->private_data = (void *)0;
+ }
+ if (enable != (int)filp->private_data)
+ ret = -EACCES;
+out:
+ mutex_unlock(&rproc_user_mutex);
+
+ return ret ? ret : 1;
+}
+
+static const struct file_operations rproc_user_fops = {
+ .owner = THIS_MODULE,
+ .open = rproc_user_open,
+ .release = rproc_user_release,
+ .read = rproc_user_read,
+ .write = rproc_user_write,
+};
+
+static int __init rproc_user_init(void)
+{
+ int ret;
+
+ ipu_device = kzalloc(sizeof(struct rproc_user_device), GFP_KERNEL);
+ if (!ipu_device) {
+ pr_err("%s: memory allocation failed for ipu_device\n",
+ __func__);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ipu_device->mdev.minor = MISC_DYNAMIC_MINOR;
+ ipu_device->mdev.name = rproc_user_name;
+ ipu_device->mdev.fops = &rproc_user_fops;
+ ipu_device->mdev.parent = NULL;
+ ret = misc_register(&ipu_device->mdev);
+ if (ret) {
+ pr_err("rproc_user_init: failed to register rproc_user misc "
+ "device\n");
+ goto misc_fail;
+ }
+ return ret;
+
+misc_fail:
+ kfree(ipu_device);
+exit:
+ return ret;
+}
+module_init(rproc_user_init);
+
+static void __exit rproc_user_exit(void)
+{
+ misc_deregister(&ipu_device->mdev);
+ kfree(ipu_device);
+}
+module_exit(rproc_user_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RemoteProc Secure Mode Interface Driver");
+MODULE_AUTHOR("Suman Anna");
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 6af3d0b..da97e1f 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -48,9 +48,14 @@
#define OMAP3_SRAM_VA 0xfe400000
#define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000)
#define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000)
-#define OMAP4_SRAM_VA 0xfe400000
-#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
-#define OMAP4_SRAM_PUB_VA (OMAP4_SRAM_VA + 0x4000)
+
+#define OMAP4_SRAM_MAX 0xe000 /* 56K */
+#define OMAP4_SRAM_VA 0xfe400000
+
+#define OMAP4_HS_SRAM_SIZE 0x1000 /* 4K */
+#define OMAP4_HS_SRAM_OFFSET (OMAP4_SRAM_MAX - OMAP4_HS_SRAM_SIZE)
+#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + OMAP4_HS_SRAM_OFFSET)
+#define OMAP4_SRAM_PUB_VA (OMAP4_SRAM_VA + OMAP4_HS_SRAM_OFFSET)
#if defined(CONFIG_ARCH_OMAP2PLUS)
#define SRAM_BOOTLOADER_SZ 0x00
@@ -76,6 +81,12 @@ static unsigned long omap_sram_start;
static unsigned long omap_sram_base;
static unsigned long omap_sram_size;
static unsigned long omap_sram_ceil;
+static unsigned long omap_barrier_base;
+
+unsigned long omap_get_sram_barrier_base(void)
+{
+ return omap_barrier_base;
+}
/*
* Depending on the target RAMFS firewall setup, the public usable amount of
@@ -128,7 +139,7 @@ static void __init omap_detect_sram(void)
} else if (cpu_is_omap44xx()) {
omap_sram_base = OMAP4_SRAM_PUB_VA;
omap_sram_start = OMAP4_SRAM_PUB_PA;
- omap_sram_size = 0xa000; /* 40K */
+ omap_sram_size = OMAP4_HS_SRAM_SIZE;
} else {
omap_sram_base = OMAP2_SRAM_PUB_VA;
omap_sram_start = OMAP2_SRAM_PUB_PA;
@@ -185,24 +196,25 @@ static void __init omap_detect_sram(void)
omap_sram_ceil = omap_sram_base + omap_sram_size;
}
-static struct map_desc omap_sram_io_desc[] __initdata = {
- { /* .length gets filled in at runtime */
- .virtual = OMAP1_SRAM_VA,
- .pfn = __phys_to_pfn(OMAP1_SRAM_PA),
- .type = MT_MEMORY
- }
-};
-
/*
* Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
*/
static void __init omap_map_sram(void)
{
unsigned long base;
+ struct map_desc omap_sram_io_desc[2];
+ int nr_desc = 1;
if (omap_sram_size == 0)
return;
+ omap_sram_io_desc[0].virtual = omap_sram_base;
+ base = omap_sram_start;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+ omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
+ omap_sram_io_desc[0].type = MT_MEMORY;
+
if (cpu_is_omap34xx()) {
/*
* SRAM must be marked as non-cached on OMAP3 since the
@@ -212,14 +224,33 @@ static void __init omap_map_sram(void)
* which will cause the system to hang.
*/
omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
+ } else if (cpu_is_omap44xx()) {
+ /*
+ * Map a page of SRAM with strongly ordered attributes
+ * for interconnect barrier usage.
+ * if we have space, then use a new page, else remap
+ * first map
+ */
+ if (omap_sram_size <= PAGE_SIZE) {
+ omap_sram_io_desc[0].type = MT_MEMORY_SO;
+ omap_barrier_base = omap_sram_io_desc[0].virtual;
+ } else {
+ omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size
+ - PAGE_SIZE, PAGE_SIZE);
+ omap_sram_io_desc[1].virtual =
+ omap_sram_base + omap_sram_io_desc[0].length;
+ omap_barrier_base = omap_sram_io_desc[1].virtual;
+ base = omap_sram_start + omap_sram_io_desc[0].length;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[1].pfn = __phys_to_pfn(base);
+ omap_sram_io_desc[1].length = PAGE_SIZE;
+ omap_sram_io_desc[1].type = MT_MEMORY_SO;
+ nr_desc = 2;
+ }
}
- omap_sram_io_desc[0].virtual = omap_sram_base;
- base = omap_sram_start;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
- omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
- iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
+
+ iotable_init(omap_sram_io_desc, nr_desc);
pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n",
(long long) __pfn_to_phys(omap_sram_io_desc[0].pfn),