diff options
author | Dan Murphy <dmurphy@ti.com> | 2011-09-12 07:44:11 -0500 |
---|---|---|
committer | Dan Murphy <dmurphy@ti.com> | 2011-09-12 07:44:11 -0500 |
commit | 17b9e750326684a6b386b7a384ad0d5083bca1bc (patch) | |
tree | d7530e105f067dafdb5a43bbf126d0b5a0502527 | |
parent | ddd4f8877e00dcc6a20687f62aaaef2be89c7d7f (diff) | |
parent | 2f04ad886cfa3b7305d81764dca5c051c7871acf (diff) | |
download | kernel_samsung_espresso10-17b9e750326684a6b386b7a384ad0d5083bca1bc.zip kernel_samsung_espresso10-17b9e750326684a6b386b7a384ad0d5083bca1bc.tar.gz kernel_samsung_espresso10-17b9e750326684a6b386b7a384ad0d5083bca1bc.tar.bz2 |
Merge branch 'android-omap-3.0' into p-android-omap-3.0
Conflicts:
net/netfilter/xt_qtaguid.c
Signed-off-by: Dan Murphy <dmurphy@ti.com>
77 files changed, 4129 insertions, 1225 deletions
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 6fa44fb..c94fe3e 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -45,8 +45,8 @@ config ARCH_OMAP4 select CPU_V7 select ARM_GIC select LOCAL_TIMERS if SMP - select PL310_ERRATA_588369 - select PL310_ERRATA_727915 + select PL310_ERRATA_588369 if CONFIG_CACHE_L2X0 + select PL310_ERRATA_727915 if CONFIG_CACHE_L2X0 select ARM_ERRATA_720789 select ARCH_HAS_OPP select PM_OPP if PM diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index b8bff9a..f52f7ea 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -325,6 +325,13 @@ wake_cpu1: if (!cpu_is_omap443x()) while (gic_dist_disabled()) cpu_relax(); + + /* + * cpu1 mucks with page tables while it is starting, + * prevent cpu0 executing any processes until cpu1 is up + */ + while (omap4_idle_requested_cx[1]) + cpu_relax(); } out: diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 2be0c1e..65f59ee 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -328,6 +328,8 @@ static void omap_init_mcpdm(void) return; } + pdata->was_context_lost = omap_pm_was_context_lost; + od = omap_device_build(dev_name, -1, oh, pdata, sizeof(struct omap_mcpdm_platform_data), omap_mcpdm_latency, @@ -371,6 +373,7 @@ static void omap_init_aess(void) } pdata->device_scale = omap_device_scale; + pdata->was_context_lost = omap_pm_was_context_lost; od = omap_device_build(dev_name, -1, oh, pdata, sizeof(struct omap4_abe_dsp_pdata), diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c index e4051c2..427fccf 100644 --- a/arch/arm/mach-omap2/dpll44xx.c +++ b/arch/arm/mach-omap2/dpll44xx.c @@ -172,6 +172,7 @@ int omap4_prcm_freq_update(void) /* Use a very high retry count - we should not hit this condition */ #define MAX_DPLL_WAIT_TRIES 1000000 +#define OMAP_1_2GHz 1200000000 #define OMAP_1GHz 1000000000 #define OMAP_920MHz 920000000 #define OMAP_748MHz 748000000 @@ -237,6 +238,32 @@ const struct clkops clkops_omap4_dpllmx_ops = { .deny_idle = omap4_dpllmx_deny_gatectrl, }; +static void omap4460_mpu_dpll_update_children(unsigned long rate) +{ + u32 v; + + /* + * The interconnect frequency to EMIF should + * be switched between MPU clk divide by 4 (for + * frequencies higher than 920Mhz) and MPU clk divide + * by 2 (for frequencies lower than or equal to 920Mhz) + * Also the async bridge to ABE must be MPU clk divide + * by 8 for MPU clk > 748Mhz and MPU clk divide by 4 + * for lower frequencies. + */ + v = __raw_readl(OMAP4430_CM_MPU_MPU_CLKCTRL); + if (rate > OMAP_920MHz) + v |= OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK; + else + v &= ~OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK; + + if (rate > OMAP_748MHz) + v |= OMAP4460_CLKSEL_ABE_DIV_MODE_MASK; + else + v &= ~OMAP4460_CLKSEL_ABE_DIV_MODE_MASK; + __raw_writel(v, OMAP4430_CM_MPU_MPU_CLKCTRL); +} + int omap4460_mpu_dpll_set_rate(struct clk *clk, unsigned long rate) { struct dpll_data *dd; @@ -254,6 +281,9 @@ int omap4460_mpu_dpll_set_rate(struct clk *clk, unsigned long rate) if (!clk->parent->set_rate) return -EINVAL; + if (rate > clk->rate) + omap4460_mpu_dpll_update_children(rate); + /* * On OMAP4460, to obtain MPU DPLL frequency higher * than 1GHz, DCC (Duty Cycle Correction) needs to @@ -261,23 +291,17 @@ int omap4460_mpu_dpll_set_rate(struct clk *clk, unsigned long rate) * And needs to be kept disabled for < 1 Ghz. */ dpll_rate = omap2_get_dpll_rate(clk->parent); - v = __raw_readl(dd->mult_div1_reg); - if (rate < OMAP_1GHz) { - if (rate == dpll_rate) - return 0; + if (rate <= OMAP_1_2GHz) { /* If DCC is enabled, disable it */ + v = __raw_readl(dd->mult_div1_reg); if (v & OMAP4460_DCC_EN_MASK) { v &= ~OMAP4460_DCC_EN_MASK; __raw_writel(v, dd->mult_div1_reg); } - clk->parent->set_rate(clk->parent, rate); + + if (rate != dpll_rate) + clk->parent->set_rate(clk->parent, rate); } else { - if (rate == dpll_rate/2) - return 0; - v &= ~OMAP4460_DCC_COUNT_MAX_MASK; - v |= (5 << OMAP4460_DCC_COUNT_MAX_SHIFT); - v |= OMAP4460_DCC_EN_MASK; - __raw_writel(v, dd->mult_div1_reg); /* * On 4460, the MPU clk for frequencies higher than 1Ghz * is sourced from CLKOUTX2_M3, instead of CLKOUT_M2, while @@ -285,31 +309,22 @@ int omap4460_mpu_dpll_set_rate(struct clk *clk, unsigned long rate) * than 1 Ghz, lock the DPLL at half the rate so the * CLKOUTX2_M3 then matches the requested rate. */ - clk->parent->set_rate(clk->parent, rate/2); - } + if (rate != dpll_rate * 2) + clk->parent->set_rate(clk->parent, rate / 2); - clk->rate = rate; + v = __raw_readl(dd->mult_div1_reg); + v &= ~OMAP4460_DCC_COUNT_MAX_MASK; + v |= (5 << OMAP4460_DCC_COUNT_MAX_SHIFT); + __raw_writel(v, dd->mult_div1_reg); - /* - * The interconnect frequency to EMIF should - * be switched between MPU clk divide by 4 (for - * frequencies higher than 920Mhz) and MPU clk divide - * by 2 (for frequencies lower than or equal to 920Mhz) - * Also the async bridge to ABE must be MPU clk divide - * by 8 for MPU clk > 748Mhz and MPU clk divide by 4 - * for lower frequencies. - */ - v = __raw_readl(OMAP4430_CM_MPU_MPU_CLKCTRL); - if (rate > OMAP_920MHz) - v |= OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK; - else - v &= ~OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK; + v |= OMAP4460_DCC_EN_MASK; + __raw_writel(v, dd->mult_div1_reg); + } - if (rate > OMAP_748MHz) - v |= OMAP4460_CLKSEL_ABE_DIV_MODE_MASK; - else - v &= ~OMAP4460_CLKSEL_ABE_DIV_MODE_MASK; - __raw_writel(v, OMAP4430_CM_MPU_MPU_CLKCTRL); + if (rate < clk->rate) + omap4460_mpu_dpll_update_children(rate); + + clk->rate = rate; return 0; } diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h index 1e60707..58983a1 100644 --- a/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h +++ b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h @@ -163,6 +163,7 @@ /* STD_FUSE_OPP_BGAP */ #define OMAP4_STD_FUSE_OPP_BGAP_SHIFT 0 #define OMAP4_STD_FUSE_OPP_BGAP_MASK (0xffffffff << 0) +#define OMAP4_STD_FUSE_OPP_BGAP_MASK_LSB (0xffff << 16) /* STD_FUSE_OPP_DPLL_0 */ #define OMAP4_STD_FUSE_OPP_DPLL_0_SHIFT 0 diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c index de06f56..faa7646 100644 --- a/arch/arm/mach-omap2/iommu2.c +++ b/arch/arm/mach-omap2/iommu2.c @@ -130,6 +130,9 @@ static int omap2_iommu_enable(struct iommu *obj) __iommu_set_twl(obj, true); + if (cpu_is_omap44xx()) + iommu_write_reg(obj, 0x1, MMU_GP_REG); + return 0; } diff --git a/arch/arm/mach-omap2/omap2plus-cpufreq.c b/arch/arm/mach-omap2/omap2plus-cpufreq.c index 5015c6f..c3b5d26 100644 --- a/arch/arm/mach-omap2/omap2plus-cpufreq.c +++ b/arch/arm/mach-omap2/omap2plus-cpufreq.c @@ -47,8 +47,6 @@ struct lpj_info { unsigned int freq; }; -#define THROTTLE_DELAY_MS 10000 - static DEFINE_PER_CPU(struct lpj_info, lpj_ref); static struct lpj_info global_lpj_ref; #endif @@ -159,64 +157,21 @@ static unsigned int omap_thermal_lower_speed(void) return max; } -static void throttle_delayed_work_fn(struct work_struct *work); - -static DECLARE_DELAYED_WORK(throttle_delayed_work, throttle_delayed_work_fn); - -static void throttle_delayed_work_fn(struct work_struct *work) -{ - unsigned int new_max; - unsigned int cur; - - mutex_lock(&omap_cpufreq_lock); - - if (max_thermal == max_freq) - goto out; - - new_max = omap_thermal_lower_speed(); - if (new_max == max_thermal) - goto out; - - max_thermal = new_max; - - pr_warn("%s: temperature still too high, throttling cpu to max %u\n", - __func__, max_thermal); - - cur = omap_getspeed(0); - if (cur > max_thermal) - omap_cpufreq_scale(max_thermal, cur); - - schedule_delayed_work(&throttle_delayed_work, - msecs_to_jiffies(THROTTLE_DELAY_MS)); - -out: - mutex_unlock(&omap_cpufreq_lock); -} - void omap_thermal_throttle(void) { unsigned int cur; mutex_lock(&omap_cpufreq_lock); - if (max_thermal != max_freq) { - pr_warn("%s: already throttling\n", __func__); - goto out; - } - max_thermal = omap_thermal_lower_speed(); - pr_warn("%s: temperature too high, starting cpu throttling at max %u\n", + pr_warn("%s: temperature too high, cpu throttle at max %u\n", __func__, max_thermal); cur = omap_getspeed(0); if (cur > max_thermal) omap_cpufreq_scale(max_thermal, cur); - schedule_delayed_work(&throttle_delayed_work, - msecs_to_jiffies(THROTTLE_DELAY_MS)); - -out: mutex_unlock(&omap_cpufreq_lock); } @@ -233,8 +188,6 @@ void omap_thermal_unthrottle(void) max_thermal = max_freq; - cancel_delayed_work_sync(&throttle_delayed_work); - pr_warn("%s: temperature reduced, ending cpu throttling\n", __func__); cur = omap_getspeed(0); diff --git a/arch/arm/mach-omap2/omap4-mpuss-lowpower.c b/arch/arm/mach-omap2/omap4-mpuss-lowpower.c index c7a9562..c4855d9 100644 --- a/arch/arm/mach-omap2/omap4-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap4-mpuss-lowpower.c @@ -573,7 +573,9 @@ cpu_prepare: * Call low level function with targeted CPU id * and its low power state. */ + stop_critical_timings(); omap4_cpu_suspend(cpu, save_state); + start_critical_timings(); /* * Restore the CPUx power state to ON otherwise CPUx diff --git a/arch/arm/mach-omap2/omap4_trim_quirks.c b/arch/arm/mach-omap2/omap4_trim_quirks.c index 5e373cb..346cb5d 100644 --- a/arch/arm/mach-omap2/omap4_trim_quirks.c +++ b/arch/arm/mach-omap2/omap4_trim_quirks.c @@ -25,7 +25,7 @@ */ int omap4_ldo_trim_configure(void) { - u32 is_trimmed = 0; + u32 bgap_trimmed = 0; u32 val; /* Applicable only for OMAP4 */ @@ -42,11 +42,13 @@ int omap4_ldo_trim_configure(void) */ if (omap_rev() >= CHIP_IS_OMAP4430ES2_2) - is_trimmed = omap_ctrl_readl( - OMAP4_CTRL_MODULE_CORE_LDOSRAM_MPU_VOLTAGE_CTRL); + bgap_trimmed = omap_ctrl_readl( + OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP); + + bgap_trimmed &= OMAP4_STD_FUSE_OPP_BGAP_MASK_LSB; /* if not trimmed, we set force overide, insted of efuse. */ - if (!is_trimmed) { + if (!bgap_trimmed) { pr_err("%s: UNTRIMMED PART\n", __func__); /* Fill in recommended values */ val = 0x0f << OMAP4_LDOSRAMCORE_ACTMODE_VSET_OUT_SHIFT; @@ -64,16 +66,27 @@ int omap4_ldo_trim_configure(void) /* write value as per trim recomendation */ val = 0xc0 << OMAP4_AVDAC_TRIM_BYTE0_SHIFT; val |= 0x01 << OMAP4_AVDAC_TRIM_BYTE1_SHIFT; - omap_ctrl_writel(val, + omap4_ctrl_pad_writel(val, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_1); } + /* For all trimmed and untrimmed write value as per recomendation */ + val = 0x10 << OMAP4_AVDAC_TRIM_BYTE0_SHIFT; + val |= 0x01 << OMAP4_AVDAC_TRIM_BYTE1_SHIFT; + val |= 0x4d << OMAP4_AVDAC_TRIM_BYTE2_SHIFT; + val |= 0x1C << OMAP4_AVDAC_TRIM_BYTE3_SHIFT; + omap4_ctrl_pad_writel(val, + OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_1); /* * For all ESx.y trimmed and untrimmed units LPDDR IO and * Smart IO override efuse. */ val = OMAP4_LPDDR2_PTV_P5_MASK | OMAP4_LPDDR2_PTV_N5_MASK; - omap_ctrl_writel(val, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_2); + omap4_ctrl_pad_writel(val, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_2); + + /* Required for DPLL_MPU to lock at 2.4 GHz */ + if (cpu_is_omap446x()) + omap_ctrl_writel(0x29, OMAP4_CTRL_MODULE_CORE_DPLL_NWELL_TRIM_0); return 0; } diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 6ae8ec6..37a44a5 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -70,6 +70,15 @@ static struct powerdomain *core_pwrdm, *per_pwrdm; static struct voltagedomain *mpu_voltdm, *iva_voltdm, *core_voltdm; +static struct clockdomain *tesla_clkdm; +static struct powerdomain *tesla_pwrdm; + +/* Yet un-named erratum which requires AUTORET to be disabled for IVA PD */ +#define OMAP4_PM_ERRATUM_IVA_AUTO_RET_iXXX BIT(1) + +static u8 pm44xx_errata; +#define is_pm44xx_erratum(erratum) (pm44xx_errata & OMAP4_PM_ERRATUM_##erratum) + #define MAX_IOPAD_LATCH_TIME 1000 void omap4_trigger_ioctrl(void) { @@ -149,8 +158,10 @@ void omap4_enter_sleep(unsigned int cpu, unsigned int power_state, bool suspend) } omap_vc_set_auto_trans(core_voltdm, OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION); - omap_vc_set_auto_trans(iva_voltdm, - OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION); + if (!is_pm44xx_erratum(IVA_AUTO_RET_iXXX)) { + omap_vc_set_auto_trans(iva_voltdm, + OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION); + } omap_temp_sensor_prepare_idle(); } @@ -172,6 +183,11 @@ void omap4_enter_sleep(unsigned int cpu, unsigned int power_state, bool suspend) omap4_sar_overwrite(); omap4_cm_prepare_off(); omap4_dpll_prepare_off(); + + /* Extend Non-EMIF I/O isolation */ + omap4_prminst_rmw_inst_reg_bits(OMAP4430_ISOOVR_EXTEND_MASK, + OMAP4430_ISOOVR_EXTEND_MASK, OMAP4430_PRM_PARTITION, + OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); } omap4_enter_lowpower(cpu, power_state); @@ -189,8 +205,10 @@ abort_device_off: /* See note above */ omap_vc_set_auto_trans(core_voltdm, OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE); - omap_vc_set_auto_trans(iva_voltdm, + if (!is_pm44xx_erratum(IVA_AUTO_RET_iXXX)) { + omap_vc_set_auto_trans(iva_voltdm, OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE); + } omap_temp_sensor_resume_idle(); if (!suspend) { @@ -206,13 +224,19 @@ abort_device_off: omap4_ldo_trim_configure(); } - /* - * GPIO: since we have put_synced clks, we need to resume - * even if OFF was not really achieved - */ - if (omap4_device_next_state_off()) + if (omap4_device_next_state_off()) { + /* + * GPIO: since we have put_synced clks, we need to resume + * even if OFF was not really achieved + */ omap2_gpio_resume_after_idle(); + /* Disable the extension of Non-EMIF I/O isolation */ + omap4_prminst_rmw_inst_reg_bits(OMAP4430_ISOOVR_EXTEND_MASK, + 0, OMAP4430_PRM_PARTITION, + OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); + } + if (mpu_next_state < PWRDM_POWER_INACTIVE) { omap_vc_set_auto_trans(mpu_voltdm, OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE); @@ -724,6 +748,52 @@ static void __init prcm_setup_regs(void) } } + + +/* omap_pm_clear_dsp_wake_up - SW WA for hardcoded wakeup dependency +* from HSI to DSP +* +* Due to HW bug, same SWakeup signal is used for both MPU and DSP. +* Thus Swakeup will unexpectedly wakeup the DSP domain even if nothing runs on +* DSP. Since MPU is faster to process SWakeup, it acknowledges the Swakeup to +* HSI before the DSP has completed its domain transition. This leaves the DSP +* Power Domain in INTRANSITION state forever, and prevents the DEVICE-OFF mode. +* +* Workaround consists in : +* when a SWakeup is asserted from HSI to MPU (and DSP) : +* - force a DSP SW wakeup +* - wait DSP module to be fully ON +* - force a DSP SW sleep +* +* Note : we detect a Swakeup is asserted to MPU by checking when an interrupt +* is received while HSI module is ON. +* +* Bug ref is HSI-C1BUG00106 : dsp swakeup generated by HSI same as mpu swakeup +*/ +static void omap_pm_clear_dsp_wake_up(void) +{ + if (!tesla_pwrdm || !tesla_clkdm) { + WARN_ONCE(1, "%s: unable to use tesla workaround\n", __func__); + return; + } + + if (omap4_prminst_read_inst_reg(tesla_pwrdm->prcm_partition, + tesla_pwrdm->prcm_offs, + OMAP4_PM_PWSTST) & OMAP_INTRANSITION_MASK) { + + if (clkdm_wakeup(tesla_clkdm)) + pr_err("%s: Failed to force wakeup of %s\n", __func__, + tesla_clkdm->name); + + /* This takes less than a few microseconds, hence in context */ + pwrdm_wait_transition(tesla_pwrdm); + + if (clkdm_sleep(tesla_clkdm)) + pr_err("%s: Failed to force sleep of %s\n", __func__, + tesla_clkdm->name); + } +} + static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) { u32 irqenable_mpu, irqstatus_mpu; @@ -736,8 +806,10 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) /* Check if a IO_ST interrupt */ if (irqstatus_mpu & OMAP4430_IO_ST_MASK) { /* Check if HSI caused the IO wakeup */ - if (omap_hsi_is_io_wakeup_from_hsi()) + if (omap_hsi_is_io_wakeup_from_hsi()) { + omap_pm_clear_dsp_wake_up(); omap_hsi_wakeup(0); + } omap_uart_resume_idle(); usbhs_wakeup(); omap4_trigger_ioctrl(); @@ -835,6 +907,16 @@ bool omap4_device_next_state_off(void) & OMAP4430_DEVICE_OFF_ENABLE_MASK ? true : false; } +static void __init omap4_pm_setup_errata(void) +{ + /* + * Current understanding is that the following errata impacts + * all OMAP4 silica + */ + if (cpu_is_omap44xx()) + pm44xx_errata |= OMAP4_PM_ERRATUM_IVA_AUTO_RET_iXXX; +} + /** * omap4_pm_init - Init routine for OMAP4 PM * @@ -857,6 +939,9 @@ static int __init omap4_pm_init(void) pr_err("Power Management for TI OMAP4.\n"); + /* setup the erratas */ + omap4_pm_setup_errata(); + prcm_setup_regs(); ret = request_irq(OMAP44XX_IRQ_PRCM, @@ -949,6 +1034,13 @@ static int __init omap4_pm_init(void) cpu0_pwrdm = pwrdm_lookup("cpu0_pwrdm"); core_pwrdm = pwrdm_lookup("core_pwrdm"); per_pwrdm = pwrdm_lookup("l4per_pwrdm"); + tesla_pwrdm = pwrdm_lookup("tesla_pwrdm"); + if (!tesla_pwrdm) + pr_err("%s: Failed to lookup tesla_pwrdm\n", __func__); + + tesla_clkdm = clkdm_lookup("tesla_clkdm"); + if (!tesla_clkdm) + pr_err("%s: Failed to lookup tesla_clkdm\n", __func__); /* Enable wakeup for PRCM IRQ for system wide suspend */ enable_irq_wake(OMAP44XX_IRQ_PRCM); diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h index d9b8320..b76cfd6 100644 --- a/arch/arm/mach-omap2/prm-regbits-44xx.h +++ b/arch/arm/mach-omap2/prm-regbits-44xx.h @@ -402,6 +402,8 @@ /* Used by PRM_IO_PMCTRL */ #define OMAP4430_GLOBAL_WUEN_SHIFT 16 #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16) +#define OMAP4430_ISOOVR_EXTEND_SHIFT 4 +#define OMAP4430_ISOOVR_EXTEND_MASK (1 << 4) /* Used by PRM_VC_CFG_I2C_MODE */ #define OMAP4430_HSMCODE_SHIFT 0 diff --git a/arch/arm/mach-omap2/remoteproc.c b/arch/arm/mach-omap2/remoteproc.c index 5d6ccdd..43d49eb 100644 --- a/arch/arm/mach-omap2/remoteproc.c +++ b/arch/arm/mach-omap2/remoteproc.c @@ -34,6 +34,10 @@ static struct omap_rproc_timers_info ipu_timers[] = { { .id = 3 }, { .id = 4 }, +#ifdef CONFIG_REMOTEPROC_WATCHDOG + { .id = 9 }, + { .id = 11 }, +#endif }; static struct omap_rproc_pdata omap4_rproc_data[] = { @@ -55,7 +59,7 @@ static struct omap_rproc_pdata omap4_rproc_data[] = { .timers_cnt = ARRAY_SIZE(ipu_timers), .idle_addr = OMAP4430_CM_M3_M3_CLKCTRL, .idle_mask = OMAP4430_STBYST_MASK, - .suspend_addr = 0xb7ff02d8, + .suspend_addr = 0xb43f02d8, .suspend_mask = ~0, .sus_timeout = 5000, .sus_mbox_name = "mailbox-1", diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index bccbb30..4278e3c 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -573,8 +573,6 @@ int omap_dm_timer_start(struct omap_dm_timer *timer) spin_lock_irqsave(&timer->lock, flags); if (timer->loses_context) { - u32 ctx_loss_cnt_after; - __timer_enable(timer); if (omap_pm_was_context_lost(&timer->pdev->dev) && timer->context_saved) { @@ -705,12 +703,8 @@ int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, spin_lock_irqsave(&timer->lock, flags); if (timer->loses_context) { - u32 ctx_loss_cnt_after; - __timer_enable(timer); - ctx_loss_cnt_after = - timer->get_context_loss_count(&timer->pdev->dev); - if ((ctx_loss_cnt_after != timer->ctx_loss_count) && + if (omap_pm_was_context_lost(&timer->pdev->dev) && timer->context_saved) { omap_timer_restore_context(timer); timer->context_saved = false; diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h index b2e391d..aaa676f 100644 --- a/arch/arm/plat-omap/include/plat/dmtimer.h +++ b/arch/arm/plat-omap/include/plat/dmtimer.h @@ -104,7 +104,6 @@ struct omap_dm_timer { struct platform_device *pdev; struct list_head node; - int (*get_context_loss_count)(struct device *dev); }; extern struct omap_dm_timer *gptimer_wakeup; @@ -118,7 +117,6 @@ struct dmtimer_platform_data { u32 needs_manual_reset:1; bool loses_context; - int (*get_context_loss_count)(struct device *dev); }; struct omap_dm_timer *omap_dm_timer_request(void); diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h index e043a53..38d92b1 100644 --- a/arch/arm/plat-omap/include/plat/iommu.h +++ b/arch/arm/plat-omap/include/plat/iommu.h @@ -13,6 +13,8 @@ #ifndef __MACH_IOMMU_H #define __MACH_IOMMU_H +#include <linux/pm_qos_params.h> + struct iotlb_entry { u32 da; u32 pa; @@ -53,6 +55,7 @@ struct iommu { u32 da_start; u32 da_end; struct platform_device *pdev; + struct pm_qos_request_list *qos_request; }; struct cr_regs { diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h index 10ad05f..45b2e36 100644 --- a/arch/arm/plat-omap/include/plat/iommu2.h +++ b/arch/arm/plat-omap/include/plat/iommu2.h @@ -36,6 +36,7 @@ #define MMU_READ_CAM 0x68 #define MMU_READ_RAM 0x6c #define MMU_EMU_FAULT_AD 0x70 +#define MMU_GP_REG 0x88 #define MMU_REG_SIZE 256 diff --git a/arch/arm/plat-omap/include/plat/mcpdm.h b/arch/arm/plat-omap/include/plat/mcpdm.h index 1ed2b8f..19ae03b 100644 --- a/arch/arm/plat-omap/include/plat/mcpdm.h +++ b/arch/arm/plat-omap/include/plat/mcpdm.h @@ -22,9 +22,7 @@ #include <linux/platform_device.h> struct omap_mcpdm_platform_data { - int (*device_enable) (struct platform_device *pdev); - int (*device_shutdown) (struct platform_device *pdev); - int (*device_idle) (struct platform_device *pdev); + bool (*was_context_lost)(struct device *dev); }; #endif diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h index d9edb6b..b95aabd 100644 --- a/arch/arm/plat-omap/include/plat/mmc.h +++ b/arch/arm/plat-omap/include/plat/mmc.h @@ -61,9 +61,6 @@ struct omap_mmc_platform_data { int (*suspend)(struct device *dev, int slot); int (*resume)(struct device *dev, int slot); - /* Return context loss count due to PM states changing */ - int (*get_context_loss_count)(struct device *dev); - u64 dma_mask; /* Integrating attributes from the omap_hwmod layer */ diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h index 9918647..e1189d9 100644 --- a/arch/arm/plat-omap/include/plat/omap-serial.h +++ b/arch/arm/plat-omap/include/plat/omap-serial.h @@ -37,13 +37,13 @@ * Enable module level wakeup in WER reg */ #define OMAP2_UART_WER_MOD_WKUP 0X7F -#define OMAP4_UART_WER_MOD_WKUP 0XFF +#define OMAP4_UART_WER_MOD_WKUP 0XE1 /* Enable XON/XOFF flow control on output */ -#define OMAP_UART_SW_TX 0x04 +#define OMAP_UART_SW_TX 0x8 /* Enable XON/XOFF flow control on input */ -#define OMAP_UART_SW_RX 0x04 +#define OMAP_UART_SW_RX 0x2 #define OMAP_UART_SYSC_RESET 0X07 #define OMAP_UART_TCR_TRIG 0X0F diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index 69c42b8..0a19b41 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c @@ -836,9 +836,13 @@ struct iommu *iommu_get(const char *name) mutex_lock(&obj->iommu_lock); if (obj->refcount++ == 0) { + dev_info(obj->dev, "%s: %s qos_request\n", __func__, obj->name); + pm_qos_update_request(obj->qos_request, 10); err = iommu_enable(obj); - if (err) + if (err) { + pm_qos_update_request(obj->qos_request, -1); goto err_enable; + } flush_iotlb_all(obj); } @@ -871,8 +875,16 @@ void iommu_put(struct iommu *obj) mutex_lock(&obj->iommu_lock); - if (--obj->refcount == 0) + if (!obj->refcount) { + dev_err(obj->dev, "%s: %s unbalanced iommu_get/put\n", + __func__, obj->name); + return -EIO; + } + + if (--obj->refcount == 0) { iommu_disable(obj); + pm_qos_update_request(obj->qos_request, -1); + } module_put(obj->owner); @@ -938,6 +950,15 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) obj->regbase = pdata->io_base; + obj->qos_request = kzalloc(sizeof(*obj->qos_request), GFP_KERNEL); + if (!obj->qos_request) { + kfree(obj); + return -ENOMEM; + } + + pm_qos_add_request(obj->qos_request, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + err = request_irq(pdata->irq, iommu_fault_handler, IRQF_SHARED, dev_name(&pdev->dev), obj); if (err < 0) @@ -977,6 +998,9 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev) iopgtable_clear_entry_all(obj); free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); + pm_qos_remove_request(obj->qos_request); + kfree(obj->qos_request); + dev_info(&pdev->dev, "%s removed\n", obj->name); kfree(obj); return 0; diff --git a/arch/arm/plat-omap/omap_rpmsg.c b/arch/arm/plat-omap/omap_rpmsg.c index 340f10b..7ef6a9b 100644 --- a/arch/arm/plat-omap/omap_rpmsg.c +++ b/arch/arm/plat-omap/omap_rpmsg.c @@ -165,7 +165,7 @@ static int omap_rpmsg_mbox_callback(struct notifier_block *this, switch (msg) { case RP_MBOX_CRASH: pr_err("%s has just crashed !\n", rpdev->rproc_name); - /* todo: smarter error handling here */ + rproc_errror_notify(rpdev->rproc); break; case RP_MBOX_ECHO_REPLY: pr_info("received echo reply from %s !\n", rpdev->rproc_name); diff --git a/drivers/cpufreq/cpufreq_hotplug.c b/drivers/cpufreq/cpufreq_hotplug.c index 853947e..4a1479d 100644 --- a/drivers/cpufreq/cpufreq_hotplug.c +++ b/drivers/cpufreq/cpufreq_hotplug.c @@ -36,8 +36,8 @@ /* Keep 10% of idle under the up threshold when decreasing the frequency */ #define DEFAULT_FREQ_DOWN_DIFFERENTIAL (10) -/* less than 20% avg load across online CPUs decreases frequency */ -#define DEFAULT_DOWN_FREQ_MAX_LOAD (30) +/* less than 35% avg load across online CPUs decreases frequency */ +#define DEFAULT_DOWN_FREQ_MAX_LOAD (35) /* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */ #define DEFAULT_SAMPLING_PERIOD (100000) diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 8c7c57c..7e73107 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1117,6 +1117,8 @@ omap_i2c_probe(struct platform_device *pdev) goto err_free_irq; } + i2c_detect_ext_master(adap); + return 0; err_free_irq: diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 9a58994..49ae5a5 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -39,6 +39,7 @@ #include <linux/rwsem.h> #include <linux/pm_runtime.h> #include <asm/uaccess.h> +#include <linux/interrupt.h> #include "i2c-core.h" @@ -518,6 +519,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) client->flags = info->flags; client->addr = info->addr; client->irq = info->irq; + client->ext_master = info->ext_master; strlcpy(client->name, info->type, sizeof(client->name)); @@ -776,15 +778,27 @@ static struct class_compat *i2c_adapter_compat_class; static void i2c_scan_static_board_info(struct i2c_adapter *adapter) { struct i2c_devinfo *devinfo; + struct i2c_client *client; down_read(&__i2c_board_lock); list_for_each_entry(devinfo, &__i2c_board_list, list) { - if (devinfo->busnum == adapter->nr - && !i2c_new_device(adapter, - &devinfo->board_info)) - dev_err(&adapter->dev, - "Can't create device at 0x%02x\n", - devinfo->board_info.addr); + if (devinfo->busnum == adapter->nr) { + client = i2c_new_device(adapter,&devinfo->board_info); + if (!client) + dev_err(&adapter->dev, + "Can't create device at 0x%02x\n", + devinfo->board_info.addr); + else { + /* Keep track of the newly created device(s) + * with external master + */ + if (client->ext_master) { + mutex_lock(&adapter->ext_clients_lock); + list_add_tail(&client->detected, &adapter->ext_clients); + mutex_unlock(&adapter->ext_clients_lock); + } + } + } } up_read(&__i2c_board_lock); } @@ -838,6 +852,9 @@ static int i2c_register_adapter(struct i2c_adapter *adap) mutex_init(&adap->userspace_clients_lock); INIT_LIST_HEAD(&adap->userspace_clients); + mutex_init(&adap->ext_clients_lock); + INIT_LIST_HEAD(&adap->ext_clients); + /* Set default timeout to 1 second if not already set */ if (adap->timeout == 0) adap->timeout = HZ; @@ -1058,6 +1075,16 @@ int i2c_del_adapter(struct i2c_adapter *adap) } mutex_unlock(&adap->userspace_clients_lock); + /* Clear list of extenally controlled clients */ + mutex_lock(&adap->ext_clients_lock); + list_for_each_entry_safe(client, next, &adap->ext_clients, + detected) { + dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name, + client->addr); + list_del(&client->detected); + } + mutex_unlock(&adap->ext_clients_lock); + /* Detach any active clients. This can't fail, thus we do not * check the returned value. This is a two-pass process, because * we can't remove the dummy devices during the first pass: they @@ -1094,6 +1121,46 @@ int i2c_del_adapter(struct i2c_adapter *adap) } EXPORT_SYMBOL(i2c_del_adapter); +/** + * i2c_detect_ext_master - Perform some special handling + * for externally controlled I2C devices. + * For now we only disable the spurious IRQ + * @adap: the adapter driving the client + * Context: can sleep + * + * This detects registered I2C devices which are controlled + * by a remote/external proc. + */ +void i2c_detect_ext_master(struct i2c_adapter *adap) +{ + struct i2c_adapter *found; + struct i2c_client *client; + + /* First make sure that this adapter was ever added */ + mutex_lock(&core_lock); + found = idr_find(&i2c_adapter_idr, adap->nr); + mutex_unlock(&core_lock); + if (found != adap) { + pr_debug("i2c-core: attempting to process unregistered " + "adapter [%s]\n", adap->name); + return; + } + + /* Disable IRQ(s) automatically registeried via HWMOD + * for I2C channel controlled by remote master + */ + mutex_lock(&adap->ext_clients_lock); + list_for_each_entry(client, &adap->ext_clients, + detected) { + dev_dbg("Client detected %s at 0x%x\n", client->name, + client->addr); + disable_irq(client->irq); + } + mutex_unlock(&adap->ext_clients_lock); + + return; +} +EXPORT_SYMBOL(i2c_detect_ext_master); /* ------------------------------------------------------------------------- */ diff --git a/drivers/misc/omap_temp_sensor.c b/drivers/misc/omap_temp_sensor.c index 2a36ad7..8a9da17 100644 --- a/drivers/misc/omap_temp_sensor.c +++ b/drivers/misc/omap_temp_sensor.c @@ -54,10 +54,14 @@ extern void omap_thermal_throttle(void); extern void omap_thermal_unthrottle(void); +static void throttle_delayed_work_fn(struct work_struct *work); + +#define THROTTLE_DELAY_MS 1000 + #define TSHUT_THRESHOLD_TSHUT_HOT 122000 /* 122 deg C */ #define TSHUT_THRESHOLD_TSHUT_COLD 100000 /* 100 deg C */ -#define BGAP_THRESHOLD_T_HOT 95000 /* 110 deg C */ -#define BGAP_THRESHOLD_T_COLD 85000 /* 100 deg C */ +#define BGAP_THRESHOLD_T_HOT 83000 /* 83 deg C */ +#define BGAP_THRESHOLD_T_COLD 76000 /* 76 deg C */ #define OMAP_ADC_START_VALUE 530 #define OMAP_ADC_END_VALUE 923 @@ -75,7 +79,7 @@ extern void omap_thermal_unthrottle(void); * @clk_rate - Holds current clock rate */ struct omap_temp_sensor { - struct platform_device pdev; + struct platform_device *pdev; struct device *dev; struct clk *clock; struct spinlock lock; @@ -83,10 +87,11 @@ struct omap_temp_sensor { unsigned int tshut_irq; unsigned long phy_base; int is_efuse_valid; + u8 clk_on; unsigned long clk_rate; u32 current_temp; u32 save_ctx; - u8 clk_on; + struct delayed_work throttle_work; }; #ifdef CONFIG_PM @@ -260,8 +265,8 @@ static ssize_t omap_temp_show_current(struct device *dev, struct device_attribute *devattr, char *buf) { - struct platform_device *pdev = container_of(dev, struct platform_device, dev); - struct omap_temp_sensor *temp_sensor = container_of(pdev, struct omap_temp_sensor, pdev); + struct platform_device *pdev = to_platform_device(dev); + struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev); return sprintf(buf, "%d\n", omap_read_current_temp(temp_sensor)); } @@ -304,7 +309,7 @@ static int omap_temp_sensor_enable(struct omap_temp_sensor *temp_sensor) goto out; } - ret = pm_runtime_get_sync(&temp_sensor->pdev.dev); + ret = pm_runtime_get_sync(&temp_sensor->pdev->dev); if (ret) { pr_err("%s:get sync failed\n", __func__); ret = -EINVAL; @@ -357,7 +362,7 @@ static int omap_temp_sensor_disable(struct omap_temp_sensor *temp_sensor) temp = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET); /* Gate the clock */ - ret = pm_runtime_put_sync_suspend(&temp_sensor->pdev.dev); + ret = pm_runtime_put_sync_suspend(&temp_sensor->pdev->dev); if (ret) { pr_err("%s:put sync failed\n", __func__); ret = -EINVAL; @@ -370,6 +375,32 @@ out: return ret; } +/* + * Check if the die sensor is cooling down. If it's higher than + * t_hot since the last throttle then throttle it again. + * OMAP junction temperature could stay for a long time in an + * unacceptable temperature range. The idea here is to check after + * t_hot->throttle the system really came below t_hot else re-throttle + * and keep doing till it's under t_hot temp range. + */ +static void throttle_delayed_work_fn(struct work_struct *work) +{ + u32 curr; + struct omap_temp_sensor *temp_sensor = + container_of(work, struct omap_temp_sensor, + throttle_work.work); + curr = omap_read_current_temp(temp_sensor); + + if (curr >= BGAP_THRESHOLD_T_HOT || curr < 0) { + omap_thermal_throttle(); + schedule_delayed_work(&temp_sensor->throttle_work, + msecs_to_jiffies(THROTTLE_DELAY_MS)); + } else { + schedule_delayed_work(&temp_sensor->throttle_work, + msecs_to_jiffies(THROTTLE_DELAY_MS)); + } +} + static irqreturn_t omap_tshut_irq_handler(int irq, void *data) { struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data; @@ -400,9 +431,12 @@ static irqreturn_t omap_talert_irq_handler(int irq, void *data) temp_offset = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); if (t_hot) { omap_thermal_throttle(); + schedule_delayed_work(&temp_sensor->throttle_work, + msecs_to_jiffies(THROTTLE_DELAY_MS)); temp_offset &= ~(OMAP4_MASK_HOT_MASK); temp_offset |= OMAP4_MASK_COLD_MASK; } else if (t_cold) { + cancel_delayed_work_sync(&temp_sensor->throttle_work); omap_thermal_unthrottle(); temp_offset &= ~(OMAP4_MASK_COLD_MASK); temp_offset |= OMAP4_MASK_HOT_MASK; @@ -464,7 +498,7 @@ static int __devinit omap_temp_sensor_probe(struct platform_device *pdev) } temp_sensor->phy_base = pdata->offset; - temp_sensor->pdev = *pdev; + temp_sensor->pdev = pdev; temp_sensor->dev = dev; temp_sensor->save_ctx = 0; @@ -479,7 +513,7 @@ static int __devinit omap_temp_sensor_probe(struct platform_device *pdev) OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP)) temp_sensor->is_efuse_valid = 1; - temp_sensor->clock = clk_get(&temp_sensor->pdev.dev, "fck"); + temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck"); if (IS_ERR(temp_sensor->clock)) { ret = PTR_ERR(temp_sensor->clock); pr_err("%s:Unable to get fclk: %d\n", __func__, ret); @@ -487,14 +521,18 @@ static int __devinit omap_temp_sensor_probe(struct platform_device *pdev) goto clk_get_err; } + /* Init delayed work for throttle decision */ + INIT_DELAYED_WORK(&temp_sensor->throttle_work, + throttle_delayed_work_fn); + + platform_set_drvdata(pdev, temp_sensor); + ret = omap_temp_sensor_enable(temp_sensor); if (ret) { dev_err(dev, "%s:Cannot enable temp sensor\n", __func__); goto sensor_enable_err; } - platform_set_drvdata(pdev, temp_sensor); - omap_enable_continuous_mode(temp_sensor); omap_configure_temp_sensor_thresholds(temp_sensor); /* 1 ms */ @@ -542,10 +580,12 @@ static int __devinit omap_temp_sensor_probe(struct platform_device *pdev) dev_info(dev, "%s probed", pdata->name); temp_sensor_pm = temp_sensor; + return 0; sysfs_create_err: free_irq(temp_sensor->tshut_irq, temp_sensor); + cancel_delayed_work_sync(&temp_sensor->throttle_work); tshut_irq_req_err: free_irq(temp_sensor->irq, temp_sensor); req_irq_err: @@ -566,10 +606,10 @@ plat_res_err: static int __devexit omap_temp_sensor_remove(struct platform_device *pdev) { - struct omap_temp_sensor *temp_sensor = container_of(pdev, - struct omap_temp_sensor, pdev); + struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &omap_temp_sensor_group); + cancel_delayed_work_sync(&temp_sensor->throttle_work); omap_temp_sensor_disable(temp_sensor); clk_put(temp_sensor->clock); platform_set_drvdata(pdev, NULL); @@ -619,8 +659,7 @@ static void omap_temp_sensor_restore_ctxt(struct omap_temp_sensor *temp_sensor) static int omap_temp_sensor_suspend(struct platform_device *pdev, pm_message_t state) { - struct omap_temp_sensor *temp_sensor = container_of(pdev, - struct omap_temp_sensor, pdev); + struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev); omap_temp_sensor_disable(temp_sensor); @@ -629,8 +668,8 @@ static int omap_temp_sensor_suspend(struct platform_device *pdev, static int omap_temp_sensor_resume(struct platform_device *pdev) { - struct omap_temp_sensor *temp_sensor = container_of(pdev, - struct omap_temp_sensor, pdev); + struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev); + omap_temp_sensor_enable(temp_sensor); return 0; @@ -654,9 +693,9 @@ omap_temp_sensor_resume NULL #endif /* CONFIG_PM */ static int omap_temp_sensor_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct omap_temp_sensor *temp_sensor = container_of(pdev, - struct omap_temp_sensor, pdev); + struct omap_temp_sensor *temp_sensor = + platform_get_drvdata(to_platform_device(dev)); + omap_temp_sensor_save_ctxt(temp_sensor); temp_sensor->save_ctx = 1; return 0; @@ -664,9 +703,8 @@ static int omap_temp_sensor_runtime_suspend(struct device *dev) static int omap_temp_sensor_runtime_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct omap_temp_sensor *temp_sensor = container_of(pdev, - struct omap_temp_sensor, pdev); + struct omap_temp_sensor *temp_sensor = + platform_get_drvdata(to_platform_device(dev)); if (temp_sensor->save_ctx) return 0; if (omap_pm_was_context_lost(dev)) { diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7c3444a..bbe0f2e 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -40,7 +40,6 @@ #include "sdio_ops.h" static struct workqueue_struct *workqueue; -static struct wake_lock mmc_delayed_work_wake_lock; /* * Enabling software CRCs on the data blocks can be a significant (30%) @@ -73,7 +72,6 @@ MODULE_PARM_DESC( static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) { - wake_lock(&mmc_delayed_work_wake_lock); return queue_delayed_work(workqueue, work, delay); } @@ -559,12 +557,9 @@ void mmc_host_deeper_disable(struct work_struct *work) /* If the host is claimed then we do not want to disable it anymore */ if (!mmc_try_claim_host(host)) - goto out; + return; mmc_host_do_disable(host, 1); mmc_do_release_host(host); - -out: - wake_unlock(&mmc_delayed_work_wake_lock); } /** @@ -1198,6 +1193,7 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay) spin_unlock_irqrestore(&host->lock, flags); #endif + wake_lock(&host->detect_wake_lock); mmc_schedule_delayed_work(&host->detect, delay); } @@ -1660,11 +1656,13 @@ void mmc_rescan(struct work_struct *work) out: if (extend_wakelock) - wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); + wake_lock_timeout(&host->detect_wake_lock, HZ / 2); else - wake_unlock(&mmc_delayed_work_wake_lock); - if (host->caps & MMC_CAP_NEEDS_POLL) + wake_unlock(&host->detect_wake_lock); + if (host->caps & MMC_CAP_NEEDS_POLL) { + wake_lock(&host->detect_wake_lock); mmc_schedule_delayed_work(&host->detect, HZ); + } } void mmc_start_host(struct mmc_host *host) @@ -1684,7 +1682,8 @@ void mmc_stop_host(struct mmc_host *host) if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); - cancel_delayed_work_sync(&host->detect); + if (cancel_delayed_work_sync(&host->detect)) + wake_unlock(&host->detect_wake_lock); mmc_flush_scheduled_work(); /* clear pm flags now and let card drivers set them as needed */ @@ -1805,7 +1804,8 @@ int mmc_suspend_host(struct mmc_host *host) if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); - cancel_delayed_work(&host->detect); + if (cancel_delayed_work(&host->detect)) + wake_unlock(&host->detect_wake_lock); mmc_flush_scheduled_work(); mmc_bus_get(host); @@ -1907,7 +1907,8 @@ int mmc_pm_notify(struct notifier_block *notify_block, } host->rescan_disable = 1; spin_unlock_irqrestore(&host->lock, flags); - cancel_delayed_work_sync(&host->detect); + if (cancel_delayed_work_sync(&host->detect)) + wake_unlock(&host->detect_wake_lock); if (!host->bus_ops || host->bus_ops->suspend) break; @@ -1965,9 +1966,6 @@ static int __init mmc_init(void) if (!workqueue) return -ENOMEM; - wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND, - "mmc_delayed_work"); - ret = mmc_register_bus(); if (ret) goto destroy_workqueue; @@ -1988,7 +1986,6 @@ unregister_bus: mmc_unregister_bus(); destroy_workqueue: destroy_workqueue(workqueue); - wake_lock_destroy(&mmc_delayed_work_wake_lock); return ret; } @@ -1999,7 +1996,6 @@ static void __exit mmc_exit(void) mmc_unregister_host_class(); mmc_unregister_bus(); destroy_workqueue(workqueue); - wake_lock_destroy(&mmc_delayed_work_wake_lock); } subsys_initcall(mmc_init); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 84694a9..facc4ea 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -284,6 +284,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); + wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND, + kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host))); INIT_DELAYED_WORK(&host->detect, mmc_rescan); INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); #ifdef CONFIG_PM @@ -383,6 +385,7 @@ void mmc_free_host(struct mmc_host *host) spin_lock(&mmc_host_lock); idr_remove(&mmc_host_idr, host->index); spin_unlock(&mmc_host_lock); + wake_lock_destroy(&host->detect_wake_lock); put_device(&host->class_dev); } diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h index 735d39b..5d9e678 100644 --- a/drivers/net/wireless/bcmdhd/dhd.h +++ b/drivers/net/wireless/bcmdhd/dhd.h @@ -202,7 +202,7 @@ typedef struct dhd_pub { void* wlfc_state; #endif bool dongle_isolation; - + int hang_was_sent; #ifdef WLMEDIA_HTSF uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */ #endif @@ -389,7 +389,7 @@ extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); extern void dhd_customer_gpio_wlan_ctrl(int onoff); -extern int dhd_custom_get_mac_address(unsigned char *buf); +extern int dhd_custom_get_mac_address(unsigned char *buf); extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c index 54ce6ff..a86ea56 100644 --- a/drivers/net/wireless/bcmdhd/dhd_cdc.c +++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c @@ -57,6 +57,17 @@ * round off at the end of buffer */ +#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */ + +#ifdef PROP_TXSTATUS +typedef struct dhd_wlfc_commit_info { + uint8 needs_hdr; + uint8 ac_fifo_credit_spent; + ewlfc_packet_state_t pkt_type; + wlfc_mac_descriptor_t* mac_entry; + void* p; +} dhd_wlfc_commit_info_t; +#endif /* PROP_TXSTATUS */ typedef struct dhd_prot { uint16 reqid; uint8 pending; @@ -152,7 +163,8 @@ dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uin memcpy(prot->buf, buf, len); if ((ret = dhdcdc_msg(dhd)) < 0) { - DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + if (!dhd->hang_was_sent) + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); goto done; } @@ -206,6 +218,17 @@ dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 DHD_TRACE(("%s: Enter\n", __FUNCTION__)); DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } memset(msg, 0, sizeof(cdc_ioctl_t)); @@ -629,8 +652,8 @@ dhd_wlfc_hanger_get_free_slot(void* hanger) if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) return (uint16)i; } + h->failed_slotfind++; } - h->failed_slotfind++; return WLFC_HANGER_MAXITEMS; } @@ -1287,6 +1310,30 @@ _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* } int +_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, uint8 available_credit_map, int borrower_ac) +{ + int lender_ac; + int rc = BCME_ERROR; + + if (ctx == NULL || available_credit_map == 0) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + /* Borrow from lowest priority available AC (including BC/MC credits) */ + for (lender_ac = 0; lender_ac <= AC_COUNT; lender_ac++) { + if ((available_credit_map && (1 << lender_ac)) && + (ctx->FIFO_credit[lender_ac] > 0)) { + ctx->credits_borrowed[borrower_ac][lender_ac]++; + ctx->FIFO_credit[lender_ac]--; + rc = BCME_OK; + break; + } + } + + return rc; +} +int dhd_wlfc_interface_entry_update(void* state, ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea) { @@ -1313,6 +1360,7 @@ dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) /* credit for bc/mc packets */ ctx->FIFO_credit[4] = credits[4]; /* credit for ATIM FIFO is not used yet. */ + ctx->FIFO_credit[5] = 0; return BCME_OK; } @@ -1345,17 +1393,68 @@ dhd_wlfc_enque_sendq(void* state, int prec, void* p) } int +_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, + dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx) +{ + uint32 hslot; + int rc; + + /* + if ac_fifo_credit_spent = 0 + + This packet will not count against the FIFO credit. + To ensure the txstatus corresponding to this packet + does not provide an implied credit (default behavior) + mark the packet accordingly. + + if ac_fifo_credit_spent = 1 + + This is a normal packet and it counts against the FIFO + credit count. + */ + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent); + rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p, + commit_info->needs_hdr, &hslot); + + if (rc == BCME_OK) + rc = fcommit(commit_ctx, commit_info->p); + else + ctx->stats.generic_error++; + + if (rc == BCME_OK) { + ctx->stats.pkt2bus++; + if (commit_info->ac_fifo_credit_spent) { + ctx->stats.sendq_pkts[ac]++; + WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); + } + } + else { + /* + bus commit has failed, rollback. + - remove wl-header for a delayed packet + - save wl-header header for suppressed packets + */ + rc = _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, + (commit_info->pkt_type), hslot); + if (rc != BCME_OK) + ctx->stats.rollback_failed++; + + rc = BCME_ERROR; + } + + return rc; +} +int dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx) { int ac; int credit; - uint8 ac_fifo_credit_spent; - uint8 needs_hdr; - uint32 hslot; - void* p; int rc; + dhd_wlfc_commit_info_t commit_info; athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; - wlfc_mac_descriptor_t* mac_entry; + int credit_count = 0; + int bus_retry_count = 0; + uint8 ac_available = 0; /* Bitmask for 4 ACs + BC/MC */ if ((state == NULL) || (fcommit == NULL)) { @@ -1363,8 +1462,12 @@ dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx) return BCME_BADARG; } - /* + memset(&commit_info, 0, sizeof(commit_info)); + + /* Commit packets for regular AC traffic. Higher priority first. + First, use up FIFO credits available to each AC. Based on distribution + and credits left, borrow from other ACs as applicable -NOTE: If the bus between the host and firmware is overwhelmed by the @@ -1373,96 +1476,189 @@ dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx) have to employ weighted round-robin or ucode scheme to avoid low priority packet starvation. */ + for (ac = AC_COUNT; ac >= 0; ac--) { - for (credit = 0; credit < ctx->FIFO_credit[ac];) { - p = _dhd_wlfc_deque_delayedq(ctx, ac, &ac_fifo_credit_spent, &needs_hdr, - &mac_entry); - if (p == NULL) - break; - /* - if ac_fifo_credit_spent = 0 - This packet will not count against the FIFO credit. - To ensure the txstatus corresponding to this packet - does not provide an implied credit (default behavior) - mark the packet accordingly. + int initial_credit_count = ctx->FIFO_credit[ac]; - if ac_fifo_credit_spent = 1 + for (credit = 0; credit < ctx->FIFO_credit[ac];) { + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry)); - This is a normal packet and it counts against the FIFO - credit count. - */ - DHD_PKTTAG_SETCREDITCHECK(PKTTAG(p), ac_fifo_credit_spent); - rc = _dhd_wlfc_pretx_pktprocess(ctx, mac_entry, p, needs_hdr, &hslot); + if (commit_info.p == NULL) + break; - if (rc == BCME_OK) - rc = fcommit(commit_ctx, p); - else - ctx->stats.generic_error++; + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ if (rc == BCME_OK) { - ctx->stats.pkt2bus++; - if (ac_fifo_credit_spent) { - ctx->stats.sendq_pkts[ac]++; - WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); - /* - 1 FIFO credit has been spent by sending this packet - to the device. - */ + if (commit_info.ac_fifo_credit_spent) { credit++; } } else { - /* bus commit has failed, rollback. */ - rc = _dhd_wlfc_rollback_packet_toq(ctx, - p, - /* - - remove wl-header for a delayed packet - - save wl-header header for suppressed packets - */ - (needs_hdr ? eWLFC_PKTTYPE_DELAYED : - eWLFC_PKTTYPE_SUPPRESSED), - hslot); - if (rc != BCME_OK) - ctx->stats.rollback_failed++; + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + ctx->FIFO_credit[ac] -= credit; + return rc; + } } } + ctx->FIFO_credit[ac] -= credit; - /* packets from SENDQ are fresh and they'd need header */ - needs_hdr = 1; + + /* packets from SENDQ are fresh and they'd need header and have no MAC entry */ + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.pkt_type = eWLFC_PKTTYPE_NEW; + for (credit = 0; credit < ctx->FIFO_credit[ac];) { - p = _dhd_wlfc_deque_sendq(ctx, ac, &ac_fifo_credit_spent); - if (p == NULL) + commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac, + &(commit_info.ac_fifo_credit_spent)); + if (commit_info.p == NULL) break; - DHD_PKTTAG_SETCREDITCHECK(PKTTAG(p), ac_fifo_credit_spent); - rc = _dhd_wlfc_pretx_pktprocess(ctx, NULL, p, needs_hdr, &hslot); - if (rc == BCME_OK) - rc = fcommit(commit_ctx, p); - else - ctx->stats.generic_error++; + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + /* Bus commits may fail (e.g. flow control); abort after retries */ if (rc == BCME_OK) { - ctx->stats.pkt2bus++; - if (ac_fifo_credit_spent) { - WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); - ctx->stats.sendq_pkts[ac]++; + if (commit_info.ac_fifo_credit_spent) { credit++; } } else { - /* bus commit has failed, rollback. */ - rc = _dhd_wlfc_rollback_packet_toq(ctx, - p, - /* remove wl-header while rolling back */ - eWLFC_PKTTYPE_NEW, - hslot); - if (rc != BCME_OK) - ctx->stats.rollback_failed++; + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + ctx->FIFO_credit[ac] -= credit; + return rc; + } } } + ctx->FIFO_credit[ac] -= credit; + + /* If no credits were used, the queue is idle and can be re-used + Note that resv credits cannot be borrowed + */ + if (initial_credit_count == ctx->FIFO_credit[ac]) { + ac_available |= (1 << ac); + credit_count += ctx->FIFO_credit[ac]; + } + } + + /* We borrow only for AC_BE and only if no other traffic seen for DEFER_PERIOD + + Note that (ac_available & WLFC_AC_BE_TRAFFIC_ONLY) is done to: + a) ignore BC/MC for deferring borrow + b) ignore AC_BE being available along with other ACs + (this should happen only for pure BC/MC traffic) + + i.e. AC_VI, AC_VO, AC_BK all MUST be available (i.e. no traffic) and + we do not care if AC_BE and BC/MC are available or not + */ + if ((ac_available & WLFC_AC_BE_TRAFFIC_ONLY) == WLFC_AC_BE_TRAFFIC_ONLY) { + + if (ctx->allow_credit_borrow) { + ac = 1; /* Set ac to AC_BE and borrow credits */ + } + else { + int delta; + int curr_t = OSL_SYSUPTIME(); + + if (curr_t > ctx->borrow_defer_timestamp) + delta = curr_t - ctx->borrow_defer_timestamp; + else + delta = 0xffffffff + curr_t - ctx->borrow_defer_timestamp; + + if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) { + /* Reset borrow but defer to next iteration (defensive borrowing) */ + ctx->allow_credit_borrow = TRUE; + ctx->borrow_defer_timestamp = 0; + } + return BCME_OK; + } } + else { + /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */ + ctx->allow_credit_borrow = FALSE; + ctx->borrow_defer_timestamp = OSL_SYSUPTIME(); + return BCME_OK; + } + + /* At this point, borrow all credits only for "ac" (which should be set above to AC_BE) + Generically use "ac" only in case we extend to all ACs in future + */ + for (; (credit_count > 0);) { + + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry)); + if (commit_info.p == NULL) + break; + + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + if (commit_info.ac_fifo_credit_spent) { + (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac); + credit_count--; + } + } + else { + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + return rc; + } + } + } + + /* packets from SENDQ are fresh and they'd need header and have no MAC entry */ + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.pkt_type = eWLFC_PKTTYPE_NEW; + + for (; (credit_count > 0);) { + + commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac, + &(commit_info.ac_fifo_credit_spent)); + if (commit_info.p == NULL) + break; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + if (commit_info.ac_fifo_credit_spent) { + (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac); + credit_count--; + } + } + else { + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + return rc; + } + } + } + return BCME_OK; } @@ -1489,6 +1685,7 @@ dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) dhd->wlfc_state; void* p; + int fifo_id; if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) { #ifdef PROP_TXSTATUS_DEBUG @@ -1506,11 +1703,29 @@ dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) /* indicate failure and free the packet */ dhd_txcomplete(dhd, txp, FALSE); - PKTFREE(wlfc->osh, txp, TRUE); /* return the credit, if necessary */ - if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp))) - wlfc->FIFO_credit[DHD_PKTTAG_FIFO(PKTTAG(txp))]++; + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp))) { + int lender, credit_returned = 0; /* Note that borrower is fifo_id */ + + fifo_id = DHD_PKTTAG_FIFO(PKTTAG(txp)); + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } + } + + PKTFREE(wlfc->osh, txp, TRUE); } return; } @@ -1593,7 +1808,21 @@ dhd_wlfc_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info) /* pick up the implicit credit from this packet */ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) { if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) { - wlfc->FIFO_credit[fifo_id]++; + int lender, credit_returned = 0; /* Note that borrower is fifo_id */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } } } else { @@ -1644,8 +1873,33 @@ dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) #endif /* update FIFO credits */ if (wlfc->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT) - wlfc->FIFO_credit[i] += credits[i]; + { + int lender; /* Note that borrower is i */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) { + if (wlfc->credits_borrowed[i][lender] > 0) { + if (credits[i] >= wlfc->credits_borrowed[i][lender]) { + credits[i] -= wlfc->credits_borrowed[i][lender]; + wlfc->FIFO_credit[lender] += + wlfc->credits_borrowed[i][lender]; + wlfc->credits_borrowed[i][lender] = 0; + } + else { + wlfc->credits_borrowed[i][lender] -= credits[i]; + wlfc->FIFO_credit[lender] += credits[i]; + credits[i] = 0; + } + } + } + + /* If we have more credits left over, these must belong to the AC */ + if (credits[i] > 0) { + wlfc->FIFO_credit[i] += credits[i]; + } + } } + return BCME_OK; } @@ -1986,6 +2240,9 @@ dhd_wlfc_enable(dhd_pub_t *dhd) wlfc->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT; + wlfc->allow_credit_borrow = TRUE; + wlfc->borrow_defer_timestamp = 0; + return BCME_OK; } @@ -2165,7 +2422,7 @@ dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf) dhd_wlfc_parse_header_info(dhd, pktbuf, (h->dataOffset << 2)); ((athost_wl_status_info_t*)dhd->wlfc_state)->stats.dhd_hdrpulls++; dhd_wlfc_commit_packets(dhd->wlfc_state, (f_commitpkt_t)dhd_bus_txdata, - dhd->bus); + (void *)dhd->bus); dhd_os_wlfc_unblock(dhd); } #endif /* PROP_TXSTATUS */ @@ -2232,7 +2489,6 @@ dhd_prot_dstats(dhd_pub_t *dhd) return; } - int dhd_prot_init(dhd_pub_t *dhd) { diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c index 13791cd..94d3262 100644 --- a/drivers/net/wireless/bcmdhd/dhd_common.c +++ b/drivers/net/wireless/bcmdhd/dhd_common.c @@ -1970,7 +1970,7 @@ dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr, pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); pfn_element.wsec = htod32(0); pfn_element.infra = htod32(1); - + pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT); memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len); pfn_element.ssid.SSID_len = ssids_local[i].SSID_len; diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c index 569bd7f..61b1aef 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux.c +++ b/drivers/net/wireless/bcmdhd/dhd_linux.c @@ -132,7 +132,11 @@ MODULE_LICENSE("GPL v2"); #include <dhd_bus.h> +#ifndef PROP_TXSTATUS #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) +#else +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) +#endif #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) const char * @@ -218,7 +222,7 @@ typedef struct dhd_info { struct semaphore proto_sem; #ifdef PROP_TXSTATUS spinlock_t wlfc_spinlock; -#endif +#endif /* PROP_TXSTATUS */ #ifdef WLMEDIA_HTSF htsf_t htsf; #endif @@ -258,8 +262,6 @@ typedef struct dhd_info { int wakelock_counter; int wakelock_timeout_enable; - int hang_was_sent; - /* Thread to issue ioctl for multicast */ bool set_macaddress; struct ether_addr macvalue; @@ -498,7 +500,6 @@ static struct notifier_block dhd_sleep_pm_notifier = { extern int register_pm_notifier(struct notifier_block *nb); extern int unregister_pm_notifier(struct notifier_block *nb); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ - /* && defined(DHD_GPL) */ static void dhd_set_packet_filter(int value, dhd_pub_t *dhd) { @@ -976,10 +977,8 @@ dhd_op_if(dhd_if_t *ifp) dhd_net_attach)) { ifp->state = 0; return; - } - + } #endif - if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) { DHD_ERROR(("%s: dhd_net_attach failed, err %d\n", __FUNCTION__, err)); @@ -1037,7 +1036,7 @@ dhd_op_if(dhd_if_t *ifp) ap_net_dev = NULL; /* NULL SOFTAP global wl0.1 as well */ dhd_os_spin_unlock(&dhd->pub, flags); #endif /* SOFTAP */ - MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); } } @@ -1413,6 +1412,7 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) continue; } + pnext = PKTNEXT(dhdp->osh, pktbuf); PKTSETNEXT(wl->sh.osh, pktbuf, NULL); @@ -2078,6 +2078,14 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) goto done; } + /* send to dongle only if we are not waiting for reload already */ + if (dhd->pub.hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + /* check for local dhd ioctl and handle it */ if (driver == DHD_IOCTL_MAGIC) { bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen); @@ -2185,6 +2193,41 @@ done: } static int +dhd_cleanup_virt_ifaces(dhd_info_t *dhd) +{ + int i = 1; /* Leave ifidx 0 [Primary Interface] */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + int rollback_lock = FALSE; +#endif + + DHD_TRACE(("%s: Enter \n", __func__)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + /* release lock for unregister_netdev */ + if (rtnl_is_locked()) { + rtnl_unlock(); + rollback_lock = TRUE; + } +#endif + + for (i = 1; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + DHD_TRACE(("Deleting IF: %d \n", i)); + dhd->iflist[i]->state = WLC_E_IF_DEL; + dhd->iflist[i]->idx = i; + dhd_op_if(dhd->iflist[i]); + } + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (rollback_lock) + rtnl_lock(); +#endif + + return 0; +} + +static int dhd_stop(struct net_device *net) { int ifidx; @@ -2197,8 +2240,17 @@ dhd_stop(struct net_device *net) ifidx = dhd_net2idx(dhd, net); #ifdef WL_CFG80211 - if (ifidx == 0) + if (ifidx == 0) { wl_cfg80211_down(); + + /** For CFG80211: Clean up all the left over virtual interfaces + * when the primary Interface is brought down. [ifconfig wlan0 down] + */ + if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && + (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { + dhd_cleanup_virt_ifaces(dhd); + } + } #endif #ifdef PROP_TXSTATUS @@ -2238,6 +2290,7 @@ dhd_open(struct net_device *net) strcpy(fw_path, firmware_path); firmware_path[0] = '\0'; } + #if !defined(WL_CFG80211) /** Force start if ifconfig_up gets called before START command * We keep WEXT's wl_control_wl_start to provide backward compatibility @@ -2262,7 +2315,9 @@ dhd_open(struct net_device *net) if (ifidx == 0) { atomic_set(&dhd->pend_8021x_cnt, 0); #if defined(WL_CFG80211) + DHD_ERROR(("\n%s\n", dhd_version)); wl_android_wifi_on(net); + dhd->pub.hang_was_sent = 0; #endif if (dhd->pub.busstate != DHD_BUS_DATA) { @@ -2610,7 +2665,7 @@ dhd_bus_start(dhd_pub_t *dhdp) ASSERT(dhd); - DHD_TRACE(("%s: \n", __FUNCTION__)); + DHD_TRACE(("Enter %s:\n", __FUNCTION__)); dhd_os_sdlock(dhdp); @@ -2706,6 +2761,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif int scan_assoc_time = 40; int scan_unassoc_time = 40; + int scan_passive_time = 130; char buf[WLC_IOCTL_SMLEN]; char *ptr; uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ @@ -2946,14 +3002,16 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) sizeof(scan_assoc_time), TRUE, 0); dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, sizeof(scan_unassoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time, + sizeof(scan_passive_time), TRUE, 0); #ifdef ARP_OFFLOAD_SUPPORT /* Set and enable ARP offload feature for STA only */ - if (arpoe #if defined(SOFTAP) - && (!ap_fw_loaded) -#endif /* (OEM_ANDROID) && defined(SOFTAP) */ - ) { + if (arpoe && !ap_fw_loaded) { +#else + if (arpoe) { +#endif dhd_arp_offload_set(dhd, dhd_arp_mode); dhd_arp_offload_enable(dhd, arpoe); } else { @@ -2970,7 +3028,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd->pktfilter[1] = NULL; dhd->pktfilter[2] = NULL; dhd->pktfilter[3] = NULL; - #if defined(SOFTAP) if (ap_fw_loaded) { int i; @@ -2979,13 +3036,12 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) 0, dhd_master_mode); } } -#endif /* (SOFTAP) */ +#endif /* defined(SOFTAP) */ #endif /* PKT_FILTER_SUPPORT */ /* Force STA UP */ - ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0); - if (ret < 0) - goto done; + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0)) < 0) + DHD_ERROR(("%s Setting WL UP failed %d\n", __FUNCTION__, ret)); done: @@ -3065,7 +3121,8 @@ int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) #ifdef ARP_OFFLOAD_SUPPORT /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */ -void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add) +void +aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add) { u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */ int i; @@ -3088,14 +3145,11 @@ void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add) } for (i = 0; i < MAX_IPV4_ENTRIES; i++) { - if (add && (ipv4_buf[i] == 0)) { - - ipv4_buf[i] = ipa; + ipv4_buf[i] = ipa; add = FALSE; /* added ipa to local table */ DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n", __FUNCTION__, i)); - } else if (ipv4_buf[i] == ipa) { ipv4_buf[i] = 0; DHD_ARPOE(("%s: removed IP:%x from temp table %d\n", @@ -3160,14 +3214,14 @@ static int dhd_device_event(struct notifier_block *this, #ifdef AOE_IP_ALIAS_SUPPORT if (!(ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a)) { DHD_ARPOE(("%s: primary interface is down, AOE clr all\n", - __FUNCTION__)); + __FUNCTION__)); dhd_aoe_hostip_clr(&dhd->pub); dhd_aoe_arp_clr(&dhd->pub); } else aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE); #else - dhd_aoe_hostip_clr(&dhd->pub); - dhd_aoe_arp_clr(&dhd->pub); + dhd_aoe_hostip_clr(&dhd->pub); + dhd_aoe_arp_clr(&dhd->pub); #endif break; @@ -3221,9 +3275,9 @@ dhd_net_attach(dhd_pub_t *dhdp, int ifidx) net->netdev_ops = &dhd_ops_pri; #endif } else { - /* - * We have to use the primary MAC for virtual interfaces - */ + /* + * We have to use the primary MAC for virtual interfaces + */ memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN); /* * Android sets the locally administered bit to indicate that this is a @@ -3362,15 +3416,10 @@ void dhd_detach(dhd_pub_t *dhdp) /* delete all interfaces, start with virtual */ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { - int i = 1; dhd_if_t *ifp; - for (i = 1; i < DHD_MAX_IFS; i++) - if (dhd->iflist[i]) { - dhd->iflist[i]->state = WLC_E_IF_DEL; - dhd->iflist[i]->idx = i; - dhd_op_if(dhd->iflist[i]); - } + /* Cleanup all virtual Interfaces */ + dhd_cleanup_virt_ifaces(dhd); /* delete primary interface 0 */ ifp = dhd->iflist[0]; @@ -3428,8 +3477,8 @@ void dhd_detach(dhd_pub_t *dhdp) #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) - unregister_pm_notifier(&dhd_sleep_pm_notifier); -#endif + unregister_pm_notifier(&dhd_sleep_pm_notifier); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { #ifdef CONFIG_HAS_WAKELOCK @@ -3437,7 +3486,6 @@ void dhd_detach(dhd_pub_t *dhdp) wake_lock_destroy(&dhd->wl_rxwake); #endif } - } @@ -3509,7 +3557,6 @@ dhd_module_init(void) #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ error = dhd_bus_register(); - if (!error) printf("\n%s\n", dhd_version); else { @@ -4156,11 +4203,14 @@ int net_os_send_hang_message(struct net_device *dev) int ret = 0; if (dhd) { - if (!dhd->hang_was_sent) { - dhd->hang_was_sent = 1; + if (!dhd->pub.hang_was_sent) { + dhd->pub.hang_was_sent = 1; #if defined(CONFIG_WIRELESS_EXT) ret = wl_iw_send_priv_event(dev, "HANG"); #endif +#if defined(WL_CFG80211) + ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif } } return ret; diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c index 72290b5..aadd122 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c +++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c @@ -26,6 +26,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> +#include <typedefs.h> #include <linuxver.h> int setScheduler(struct task_struct *p, int policy, struct sched_param *param) diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c index c412edd..cdb2448 100644 --- a/drivers/net/wireless/bcmdhd/dhd_sdio.c +++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c @@ -147,6 +147,8 @@ extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success); #ifdef DHD_DEBUG /* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX 2024 typedef struct dhd_console { uint count; /* Poll interval msec counter */ uint log_addr; /* Log struct address (fixed) */ @@ -429,7 +431,7 @@ do { \ * Mode 0: Dongle writes the software host mailbox and host is interrupted. * Mode 1: (sdiod core rev >= 4) * Device sets a new bit in the intstatus whenever there is a packet - * available in fifo. Host can't clear this specific status bit until all the + * available in fifo. Host can't clear this specific status bit until all the * packets are read from the FIFO. No need to ack dongle intstatus. * Mode 2: (sdiod core rev >= 4) * Device sets a bit in the intstatus, and host acks this by writing @@ -463,9 +465,9 @@ static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count); #ifdef DHD_DEBUG static int dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size); -static int dhdsdio_mem_dump(dhd_bus_t *bus); static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror); #endif /* DHD_DEBUG */ + static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh); @@ -1374,7 +1376,8 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); ret = 0; } else { - DHD_ERROR(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__)); + if (!bus->dhd->hang_was_sent) + DHD_ERROR(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__)); ret = -1; bus->ctrl_frame_stat = FALSE; goto done; @@ -1528,9 +1531,9 @@ enum { IOV_SD1IDLE, IOV_SLEEP, IOV_DONGLEISOLATION, - IOV_VARS + IOV_VARS, #ifdef SOFTAP - , IOV_FWPATH + IOV_FWPATH #endif }; @@ -1836,6 +1839,7 @@ dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1) return BCME_OK; + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { DHD_ERROR(("%s: sdpcm_shared version %d in dhd " "is different than sdpcm_shared version %d in dongle\n", @@ -1847,7 +1851,6 @@ dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) return BCME_OK; } -#define CONSOLE_LINE_MAX 192 static int dhdsdio_readconsole(dhd_bus_t *bus) @@ -1925,11 +1928,16 @@ dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size) int bcmerror = 0; uint msize = 512; char *mbuffer = NULL; + char *console_buffer = NULL; uint maxstrlen = 256; char *str = NULL; trap_t tr; sdpcm_shared_t sdpcm_shared; struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -2013,86 +2021,76 @@ dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size) bcm_bprintf(&strbuf, "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," "lp 0x%x, rpc 0x%x Trap offset 0x%x, " - "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n", + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), ltoh32(sdpcm_shared.trap_addr), ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); + + addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&console_size, sizeof(console_size))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&console_index, sizeof(console_index))) < 0) + goto printbuf; + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || !(console_buffer = MALLOC(bus->dhd->osh, console_size))) + goto printbuf; + + if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr, (uint8 *)console_buffer, console_size)) < 0) + goto printbuf; + + for ( i = 0, n = 0; i < console_size; i += n + 1 ) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[ (console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print a lot of information quickly */ + /* The macro will truncate a lot of the printfs */ + + if (dhd_msg_level & DHD_ERROR_VAL) + printf("CONSOLE: %s\n", line); + } + } } } +printbuf: if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); } -#ifdef DHD_DEBUG - if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { - /* Mem dump to a file on device */ - dhdsdio_mem_dump(bus); - } -#endif /* DHD_DEBUG */ done: if (mbuffer) MFREE(bus->dhd->osh, mbuffer, msize); if (str) MFREE(bus->dhd->osh, str, maxstrlen); + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); return bcmerror; } +#endif /* #ifdef DHD_DEBUG */ -static int -dhdsdio_mem_dump(dhd_bus_t *bus) -{ - int ret = 0; - int size; /* Full mem size */ - int start = 0; /* Start address */ - int read_size = 0; /* Read size of each iteration */ - uint8 *buf = NULL, *databuf = NULL; - - /* Get full mem size */ - size = bus->ramsize; - buf = MALLOC(bus->dhd->osh, size); - if (!buf) { - printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size); - return -1; - } - - /* Read mem content */ - printf("Dump dongle memory"); - databuf = buf; - while (size) - { - read_size = MIN(MEMBLOCK, size); - if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) - { - printf("%s: Error membytes %d\n", __FUNCTION__, ret); - if (buf) { - MFREE(bus->dhd->osh, buf, size); - } - return -1; - } - printf("."); - - /* Decrement size and increment start address */ - size -= read_size; - start += read_size; - databuf += read_size; - } - printf("Done\n"); - - /* free buf before return !!! */ - if (write_to_file(bus->dhd, buf, bus->ramsize)) - { - printf("%s: Error writing to files\n", __FUNCTION__); - return -1; - } - - /* buf free handled in write_to_file, not here */ - return 0; -} -#endif /* defined(DHD_DEBUG) */ int dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) @@ -3040,6 +3038,7 @@ dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) dhd_os_sdunlock(bus->dhd); } + int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) { @@ -4352,6 +4351,13 @@ dhdsdio_hostmail(dhd_bus_t *bus) bus->flowcontrol = fcbits; } +#ifdef DHD_DEBUG + /* At least print a message if FW halted */ + if (hmb_data & HMB_DATA_FWHALT) { + DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED\n")); + dhdsdio_checkdied(bus, NULL, 0); + } +#endif /* DHD_DEBUG */ /* Shouldn't be any others */ if (hmb_data & ~(HMB_DATA_DEVREADY | @@ -5176,6 +5182,9 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, #ifdef GET_CUSTOM_MAC_ENABLE struct ether_addr ea_addr; #endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef PROP_TXSTATUS + uint up = 0; +#endif /* Init global variables at run-time, not as part of the declaration. * This is required to support init/de-init of the driver. Initialization @@ -5342,6 +5351,10 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, goto fail; } +#ifdef PROP_TXSTATUS + if (dhd_download_fw_on_driverload) + dhd_wl_ioctl_cmd(bus->dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0); +#endif return bus; fail: @@ -5916,7 +5929,7 @@ err: return bcmerror; } -/* +/* EXAMPLE: nvram_array nvram_arry format: name=value @@ -6190,16 +6203,16 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhd_enable_oob_intr(bus, TRUE); #endif /* defined(OOB_INTR_ONLY) */ - bus->dhd->dongle_reset = FALSE; - bus->dhd->up = TRUE; + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; #if !defined(IGNORE_ETH0_DOWN) - /* Restore flow control */ - dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); #endif - dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); - DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); } else { dhd_bus_stop(bus, FALSE); dhdsdio_release_dongle(bus, bus->dhd->osh, diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h index 53db62c..59d018b 100644 --- a/drivers/net/wireless/bcmdhd/dhd_wlfc.h +++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h @@ -201,6 +201,14 @@ typedef struct athost_wl_stat_counters { #define WLFC_FCMODE_IMPLIED_CREDIT 1 #define WLFC_FCMODE_EXPLICIT_CREDIT 2 +#define WLFC_BORROW_DEFER_PERIOD_MS 100 + +/* Mask to represent available ACs (note: BC/MC is ignored */ +#define WLFC_AC_MASK 0xF + +/* Mask to check for only on-going AC_BE traffic */ +#define WLFC_AC_BE_TRAFFIC_ONLY 0xD + typedef struct athost_wl_status_info { uint8 last_seqid_to_wlc; @@ -213,7 +221,11 @@ typedef struct athost_wl_status_info { athost_wl_stat_counters_t stats; /* the additional ones are for bc/mc and ATIM FIFO */ - int FIFO_credit[AC_COUNT + 2]; + int FIFO_credit[AC_COUNT + 2]; + + /* Credit borrow counts for each FIFO from each of the other FIFOs */ + int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2]; + struct pktq SENDQ; /* packet hanger and MAC->handle lookup table */ @@ -228,7 +240,7 @@ typedef struct athost_wl_status_info { wlfc_mac_descriptor_t other; } destination_entries; /* token position for different priority packets */ - uint8 token_pos[AC_COUNT]; + uint8 token_pos[AC_COUNT+1]; /* ON/OFF state for flow control to the host network interface */ uint8 hostif_flow_state[WLFC_MAX_IFNUM]; uint8 host_ifidx; @@ -243,6 +255,12 @@ typedef struct athost_wl_status_info { 2 - Use explicit credit */ uint8 proptxstatus_mode; + + /* To borrow credits */ + uint8 allow_credit_borrow; + + /* Timestamp to compute how long to defer borrowing for */ + uint32 borrow_defer_timestamp; } athost_wl_status_info_t; #endif /* __wlfc_host_driver_definitions_h__ */ diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h index ec060c9..ab96c07 100644 --- a/drivers/net/wireless/bcmdhd/include/epivers.h +++ b/drivers/net/wireless/bcmdhd/include/epivers.h @@ -33,17 +33,17 @@ #define EPI_RC_NUMBER 125 -#define EPI_INCREMENTAL_NUMBER 74 +#define EPI_INCREMENTAL_NUMBER 78 #define EPI_BUILD_NUMBER 0 -#define EPI_VERSION 5, 90, 125, 74 +#define EPI_VERSION 5, 90, 125, 78 -#define EPI_VERSION_NUM 0x055a7d4a +#define EPI_VERSION_NUM 0x055a7d4e #define EPI_VERSION_DEV 5.90.125 -#define EPI_VERSION_STR "5.90.125.74" +#define EPI_VERSION_STR "5.90.125.78" #endif diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c index 239b15d..17b68e5 100644 --- a/drivers/net/wireless/bcmdhd/linux_osl.c +++ b/drivers/net/wireless/bcmdhd/linux_osl.c @@ -48,23 +48,25 @@ #define BCM_MEM_FILENAME_LEN 24 #ifdef DHD_USE_STATIC_BUF -#define MAX_STATIC_BUF_NUM 16 -#define STATIC_BUF_SIZE (PAGE_SIZE*2) -#define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE) +#define STATIC_BUF_MAX_NUM 16 +#define STATIC_BUF_SIZE (PAGE_SIZE * 2) +#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) + typedef struct bcm_static_buf { struct semaphore static_sem; unsigned char *buf_ptr; - unsigned char buf_use[MAX_STATIC_BUF_NUM]; + unsigned char buf_use[STATIC_BUF_MAX_NUM]; } bcm_static_buf_t; static bcm_static_buf_t *bcm_static_buf = 0; -#define MAX_STATIC_PKT_NUM 8 +#define STATIC_PKT_MAX_NUM 8 + typedef struct bcm_static_pkt { - struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM]; - struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM]; + struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM]; + struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM]; struct semaphore osl_pkt_sem; - unsigned char pkt_use[MAX_STATIC_PKT_NUM*2]; + unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2]; } bcm_static_pkt_t; static bcm_static_pkt_t *bcm_static_skb = 0; #endif @@ -228,8 +230,8 @@ osl_attach(void *pdev, uint bustype, bool pkttag) bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); skb_buff_ptr = dhd_os_prealloc(osh, 4, 0); - bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16); - for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++) + bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * 16); + for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++) bcm_static_skb->pkt_use[i] = 0; sema_init(&bcm_static_skb->osl_pkt_sem, 1); @@ -548,84 +550,69 @@ osl_pktget_static(osl_t *osh, uint len) struct sk_buff *skb; - if (len > (PAGE_SIZE*2)) - { + if (len > (PAGE_SIZE * 2)) { + printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); printk("Do we really need this big skb??\n"); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); - if (len <= PAGE_SIZE) - { - for (i = 0; i < MAX_STATIC_PKT_NUM; i++) - { + if (len <= PAGE_SIZE) { + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } - if (i != MAX_STATIC_PKT_NUM) - { + if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i] = 1; up(&bcm_static_skb->osl_pkt_sem); - skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; - return skb; } } - for (i = 0; i < MAX_STATIC_PKT_NUM; i++) - { - if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0) + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0) break; } - if (i != MAX_STATIC_PKT_NUM) - { - bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1; + if (i != STATIC_PKT_MAX_NUM) { + bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; - return skb; } - up(&bcm_static_skb->osl_pkt_sem); - printk("all static pkt in use!\n"); + printk("%s: all static pkt in use!\n", __FUNCTION__); return osl_pktget(osh, len); } - void osl_pktfree_static(osl_t *osh, void *p, bool send) { int i; - for (i = 0; i < MAX_STATIC_PKT_NUM; i++) - { - if (p == bcm_static_skb->skb_4k[i]) - { + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (p == bcm_static_skb->skb_4k[i]) { down(&bcm_static_skb->osl_pkt_sem); bcm_static_skb->pkt_use[i] = 0; up(&bcm_static_skb->osl_pkt_sem); - return; } } - for (i = 0; i < MAX_STATIC_PKT_NUM; i++) - { - if (p == bcm_static_skb->skb_8k[i]) - { + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i]) { down(&bcm_static_skb->osl_pkt_sem); - bcm_static_skb->pkt_use[i + MAX_STATIC_PKT_NUM] = 0; + bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0; up(&bcm_static_skb->osl_pkt_sem); return; diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c index cefa7f9..36110f95 100644 --- a/drivers/net/wireless/bcmdhd/wl_android.c +++ b/drivers/net/wireless/bcmdhd/wl_android.c @@ -108,6 +108,7 @@ uint dhd_dev_reset(struct net_device *dev, uint8 flag); void dhd_dev_init_ioctl(struct net_device *dev); #ifdef WL_CFG80211 int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command); #else int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) { return 0; } #endif @@ -454,6 +455,9 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) net_os_set_packet_filter(net, 0); /* DHCP starts */ else net_os_set_packet_filter(net, 1); /* DHCP ends */ +#ifdef WL_CFG80211 + bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command); +#endif } else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) { bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len); diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c index 08a9a04..8a77231 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg80211.c +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c @@ -123,6 +123,32 @@ static ctl_table wl_sysctl_table[] = { static struct ctl_table_header *wl_sysctl_hdr; #endif /* CONFIG_SYSCTL */ +#define COEX_DHCP + +#if defined(COEX_DHCP) +#define BT_DHCP_eSCO_FIX /* use New SCO/eSCO smart YG + * suppression + */ +#define BT_DHCP_USE_FLAGS /* this flag boost wifi pkt priority + * to max, caution: -not fair to sco + */ +#define BT_DHCP_OPPR_WIN_TIME 2500 /* T1 start SCO/ESCo priority + * suppression + */ +#define BT_DHCP_FLAG_FORCE_TIME 5500 /* T2 turn off SCO/SCO supperesion + * is (timeout) + */ +enum wl_cfg80211_btcoex_status { + BT_DHCP_IDLE, + BT_DHCP_START, + BT_DHCP_OPPR_WIN, + BT_DHCP_FLAG_FORCE_TIMEOUT +}; + +static int wl_cfg80211_btcoex_init(struct wl_priv *wl); +static void wl_cfg80211_btcoex_deinit(struct wl_priv *wl); +#endif + /* This is to override regulatory domains defined in cfg80211 module (reg.c) * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165). @@ -909,7 +935,6 @@ fail: return ERR_PTR(-ENODEV); } - static s32 wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev) { @@ -944,7 +969,6 @@ wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev) timeout = wait_event_interruptible_timeout(wl->dongle_event_wait, (wl->scan_request == false), msecs_to_jiffies(MAX_WAIT_TIME)); - if (timeout > 0 && (!wl->scan_request)) { WL_DBG(("IFDEL Operations Done")); } else { @@ -1063,6 +1087,7 @@ s32 wl_cfg80211_ifdel_ops(struct net_device *net) { struct wl_priv *wl = wlcfg_drv_priv; + int rollback_lock = FALSE; if (!net || !net->name) { WL_DBG(("net is NULL\n")); @@ -1073,10 +1098,15 @@ wl_cfg80211_ifdel_ops(struct net_device *net) /* Abort any pending scan requests */ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; - rtnl_lock(); + if (!rtnl_is_locked()) { + rtnl_lock(); + rollback_lock = TRUE; + } WL_INFO(("ESCAN COMPLETED\n")); wl_notify_escan_complete(wl, true); - rtnl_unlock(); + + if (rollback_lock) + rtnl_unlock(); } /* Wake up any waiting thread */ @@ -1093,7 +1123,6 @@ wl_cfg80211_notify_ifdel(struct net_device *net) if (wl->p2p->vif_created) { s32 index = 0; - WL_DBG(("IF_DEL event called from dongle, net %x, vif name: %s\n", (unsigned int)net, wl->p2p->vir_ifname)); @@ -1459,6 +1488,7 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct wl_priv *wl = wiphy_priv(wiphy); struct cfg80211_ssid *ssids; struct wl_scan_req *sr = wl_to_sr(wl); + wpa_ie_fixed_t *wps_ie; s32 passive_scan; bool iscan_req; bool escan_req; @@ -1466,7 +1496,8 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, bool p2p_ssid; s32 err = 0; s32 i; - + u32 wpsie_len = 0; + u8 wpsie[IE_MAX_LEN]; if (unlikely(wl_get_drv_status(wl, SCANNING))) { WL_ERR(("Scanning already : status (%d)\n", (int)wl->status)); return -EAGAIN; @@ -1531,6 +1562,26 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, } } } + if (!wl->p2p_supported || !p2p_scan(wl)) { + if (ndev == wl_to_prmry_ndev(wl)) { + /* find the WPSIE */ + memset(wpsie, 0, sizeof(wpsie)); + if ((wps_ie = wl_cfgp2p_find_wpsie( + (u8 *)request->ie, + request->ie_len)) != NULL) { + wpsie_len = + wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; + memcpy(wpsie, wps_ie, wpsie_len); + } else { + wpsie_len = 0; + } + err = wl_cfgp2p_set_management_ie(wl, ndev, -1, + VNDR_IE_PRBREQ_FLAG, wpsie, wpsie_len); + if (unlikely(err)) { + goto scan_out; + } + } + } } } } else { /* scan in ibss */ @@ -1615,6 +1666,7 @@ wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, WL_DBG(("Enter \n")); CHECK_SYS_UP(wl); + err = __wl_cfg80211_scan(wiphy, ndev, request, NULL); if (unlikely(err)) { WL_ERR(("scan error (%d)\n", err)); @@ -2100,9 +2152,12 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, size_t join_params_size; s32 err = 0; wpa_ie_fixed_t *wpa_ie; + wpa_ie_fixed_t *wps_ie; bcm_tlv_t *wpa2_ie; u8* wpaie = 0; u32 wpaie_len = 0; + u32 wpsie_len = 0; + u8 wpsie[IE_MAX_LEN]; WL_DBG(("In\n")); CHECK_SYS_UP(wl); @@ -2126,7 +2181,7 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev), VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); } else if (p2p_on(wl) && (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) { - /* This is the connect req after WPS is done [credentials exchanged] + /* This is the connect req after WPS is done [credentials exchanged] * currently identified with WPA_VERSION_2 . * Update the previously set IEs with * the newly received IEs from Supplicant. This will remove the WPS IE from @@ -2158,8 +2213,21 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, ioctlbuf, sizeof(ioctlbuf)); } + /* find the WPSIE */ + memset(wpsie, 0, sizeof(wpsie)); + if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)sme->ie, + sme->ie_len)) != NULL) { + wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN; + memcpy(wpsie, wps_ie, wpsie_len); + } else { + wpsie_len = 0; + } + err = wl_cfgp2p_set_management_ie(wl, dev, -1, + VNDR_IE_ASSOCREQ_FLAG, wpsie, wpsie_len); + if (unlikely(err)) { + return err; + } } - if (unlikely(!sme->ssid)) { WL_ERR(("Invalid ssid\n")); return -EOPNOTSUPP; @@ -2594,17 +2662,6 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, } return err; } - -#ifdef NOT_YET - /* TODO: Removed in P2P twig, check later --lin */ - val = 0; /* assume open key. otherwise 1 */ - val = htod32(val); - err = wldev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val), false); - if (unlikely(err)) { - WL_ERR(("WLC_SET_AUTH error (%d)\n", err)); - return err; - } -#endif return err; } @@ -3145,9 +3202,6 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_PRBRSP_FLAG, (u8 *)wps_ie, wpsie_len + p2pie_len); - /* remove WLC_E_PROBREQ_MSG event to prevent HOSTAPD - * from responding many probe request - */ } } cfg80211_mgmt_tx_status(dev, *cookie, buf, len, true, GFP_KERNEL); @@ -3198,7 +3252,7 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, wifi_p2p_pub_act_frame_t *act_frm = (wifi_p2p_pub_act_frame_t *) (action_frame->data); /* - * To make sure to send successfully action frame, we have to turn off mpc + * To make sure to send successfully action frame, we have to turn off mpc */ if ((act_frm->subtype == P2P_PAF_GON_REQ)|| (act_frm->subtype == P2P_PAF_GON_RSP)) { @@ -3702,8 +3756,11 @@ wl_cfg80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, } else { WL_DBG(("No WPSIE in beacon \n")); } - wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), false); - + err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), false); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + return err; + } memset(&join_params, 0, sizeof(join_params)); /* join parameters starts with ssid */ join_params_size = sizeof(join_params.ssid); @@ -4222,7 +4279,7 @@ wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev, } else { WL_DBG(("wl_notify_connect_status : event %d status : %d \n", - ntoh32(e->event_type), ntoh32(e->status))); + ntoh32(e->event_type), ntoh32(e->status))); if (wl_is_linkup(wl, e, ndev)) { wl_link_up(wl); act = true; @@ -4859,7 +4916,11 @@ static s32 wl_init_priv_mem(struct wl_priv *wl) WL_ERR(("pmk list alloc failed\n")); goto init_priv_mem_out; } - + wl->sta_info = (void *)kzalloc(sizeof(*wl->sta_info), GFP_KERNEL); + if (unlikely(!wl->sta_info)) { + WL_ERR(("sta info alloc failed\n")); + goto init_priv_mem_out; + } return 0; init_priv_mem_out: @@ -4892,6 +4953,8 @@ static void wl_deinit_priv_mem(struct wl_priv *wl) wl->fw = NULL; kfree(wl->pmk_list); wl->pmk_list = NULL; + kfree(wl->sta_info); + wl->sta_info = NULL; if (wl->ap_info) { kfree(wl->ap_info->wpa_ie); kfree(wl->ap_info->rsn_ie); @@ -5428,6 +5491,11 @@ s32 wl_cfg80211_attach(struct net_device *ndev, void *data) goto cfg80211_attach_out; } #endif +#if defined(COEX_DHCP) + if (wl_cfg80211_btcoex_init(wl)) + goto cfg80211_attach_out; +#endif /* COEX_DHCP */ + wlcfg_drv_priv = wl; return err; @@ -5444,6 +5512,11 @@ void wl_cfg80211_detach(void) wl = wlcfg_drv_priv; WL_TRACE(("In\n")); + +#if defined(COEX_DHCP) + wl_cfg80211_btcoex_deinit(wl); +#endif /* COEX_DHCP */ + #if defined(DHD_P2P_DEV_ADDR_FROM_SYSFS) && defined(CONFIG_SYSCTL) if (wl_sysctl_hdr) unregister_sysctl_table(wl_sysctl_hdr); @@ -6120,6 +6193,20 @@ s32 wl_cfg80211_up(void) return err; } +/* Private Event to Supplicant with indication that FW hangs */ +int wl_cfg80211_hang(struct net_device *dev, u16 reason) +{ + struct wl_priv *wl; + wl = wlcfg_drv_priv; + + WL_ERR(("In : FW crash Eventing\n")); + cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL); + if (wl != NULL) { + wl_link_down(wl); + } + return 0; +} + s32 wl_cfg80211_down(void) { struct wl_priv *wl; @@ -6547,3 +6634,480 @@ static int wl_setup_rfkill(struct wl_priv *wl, bool setup) err_out: return err; } + +#if defined(COEX_DHCP) +/* + * get named driver variable to uint register value and return error indication + * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, ®_value) + */ +static int +dev_wlc_intvar_get_reg(struct net_device *dev, char *name, + uint reg, int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + bcm_mkiovar(name, (char *)(®), sizeof(reg), + (char *)(&var), sizeof(var.buf)); + error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false); + + *retval = dtoh32(var.val); + return (error); +} + +static int +dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + char ioctlbuf[1024]; +#else + static char ioctlbuf[1024]; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + + bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf)); + + return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf, sizeof(ioctlbuf), false)); +} +/* +get named driver variable to uint register value and return error indication +calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value) +*/ +static int +dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val) +{ + char reg_addr[8]; + + memset(reg_addr, 0, sizeof(reg_addr)); + memcpy((char *)®_addr[0], (char *)addr, 4); + memcpy((char *)®_addr[4], (char *)val, 4); + + return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr))); +} + +static bool btcoex_is_sco_active(struct net_device *dev) +{ + int ioc_res = 0; + bool res = FALSE; + int sco_id_cnt = 0; + int param27; + int i; + + for (i = 0; i < 12; i++) { + + ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); + + WL_INFO(("%s, sample[%d], btc params: 27:%x\n", + __FUNCTION__, i, param27)); + + if (ioc_res < 0) { + WL_ERR(("%s ioc read btc params error\n", __FUNCTION__)); + break; + } + + if ((param27 & 0x6) == 2) { /* count both sco & esco */ + sco_id_cnt++; + } + + if (sco_id_cnt > 2) { + WL_INFO(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n", + __FUNCTION__, sco_id_cnt, i)); + res = TRUE; + break; + } + + msleep(5); + } + + return res; +} + +#if defined(BT_DHCP_eSCO_FIX) +/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */ +static int set_btc_esco_params(struct net_device *dev, bool trump_sco) +{ + static bool saved_status = FALSE; + + char buf_reg50va_dhcp_on[8] = + { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 }; + char buf_reg51va_dhcp_on[8] = + { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg64va_dhcp_on[8] = + { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg65va_dhcp_on[8] = + { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg71va_dhcp_on[8] = + { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + uint32 regaddr; + static uint32 saved_reg50; + static uint32 saved_reg51; + static uint32 saved_reg64; + static uint32 saved_reg65; + static uint32 saved_reg71; + + if (trump_sco) { + /* this should reduce eSCO agressive retransmit + * w/o breaking it + */ + + /* 1st save current */ + WL_INFO(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) { + saved_status = TRUE; + WL_INFO(("%s saved bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + __FUNCTION__, saved_reg50, saved_reg51, + saved_reg64, saved_reg65, saved_reg71)); + } else { + WL_ERR((":%s: save btc_params failed\n", + __FUNCTION__)); + saved_status = FALSE; + return -1; + } + + WL_INFO(("override with [50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + *(u32 *)(buf_reg50va_dhcp_on+4), + *(u32 *)(buf_reg51va_dhcp_on+4), + *(u32 *)(buf_reg64va_dhcp_on+4), + *(u32 *)(buf_reg65va_dhcp_on+4), + *(u32 *)(buf_reg71va_dhcp_on+4))); + + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg50va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg51va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg64va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg65va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg71va_dhcp_on[0], 8); + + saved_status = TRUE; + } else if (saved_status) { + /* restore previously saved bt params */ + WL_INFO(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + + regaddr = 50; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg50); + regaddr = 51; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg51); + regaddr = 64; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg64); + regaddr = 65; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg65); + regaddr = 71; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg71); + + WL_INFO(("restore bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + saved_reg50, saved_reg51, saved_reg64, + saved_reg65, saved_reg71)); + + saved_status = FALSE; + } else { + WL_ERR((":%s att to restore not saved BTCOEX params\n", + __FUNCTION__)); + return -1; + } + return 0; +} +#endif /* BT_DHCP_eSCO_FIX */ + +static void +wl_cfg80211_bt_setflag(struct net_device *dev, bool set) +{ +#if defined(BT_DHCP_USE_FLAGS) + char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 }; + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + +#if defined(BT_DHCP_eSCO_FIX) + /* set = 1, save & turn on 0 - off & restore prev settings */ + set_btc_esco_params(dev, set); +#endif + +#if defined(BT_DHCP_USE_FLAGS) + WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set)); + if (set == TRUE) + /* Forcing bt_flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_dhcp_on[0], + sizeof(buf_flag7_dhcp_on)); + else + /* Restoring default bt flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], + sizeof(buf_flag7_default)); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif +} + +static void wl_cfg80211_bt_timerfunc(ulong data) +{ + struct btcoex_info *bt_local = (struct btcoex_info *)data; + WL_TRACE(("%s\n", __FUNCTION__)); + bt_local->timer_on = 0; + schedule_work(&bt_local->work); +} + +static void wl_cfg80211_bt_handler(struct work_struct *work) +{ + struct btcoex_info *btcx_inf; + + btcx_inf = container_of(work, struct btcoex_info, work); + + if (btcx_inf->timer_on) { + btcx_inf->timer_on = 0; + del_timer_sync(&btcx_inf->timer); + } + + switch (btcx_inf->bt_state) { + case BT_DHCP_START: + /* DHCP started + * provide OPPORTUNITY window to get DHCP address + */ + WL_TRACE(("%s bt_dhcp stm: started \n", + __FUNCTION__)); + btcx_inf->bt_state = BT_DHCP_OPPR_WIN; + mod_timer(&btcx_inf->timer, + jiffies + BT_DHCP_OPPR_WIN_TIME*HZ/1000); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_OPPR_WIN: + if (btcx_inf->dhcp_done) { + WL_TRACE(("%s DHCP Done before T1 expiration\n", + __FUNCTION__)); + goto btc_coex_idle; + } + + /* DHCP is not over yet, start lowering BT priority + * enforce btc_params + flags if necessary + */ + WL_TRACE(("%s DHCP T1:%d expired\n", __FUNCTION__, + BT_DHCP_OPPR_WIN_TIME)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE); + btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT; + mod_timer(&btcx_inf->timer, + jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_FLAG_FORCE_TIMEOUT: + if (btcx_inf->dhcp_done) { + WL_TRACE(("%s DHCP Done before T2 expiration\n", + __FUNCTION__)); + } else { + /* Noo dhcp during T1+T2, restore BT priority */ + WL_TRACE(("%s DHCP wait interval T2:%d" + "msec expired\n", __FUNCTION__, + BT_DHCP_FLAG_FORCE_TIME)); + } + + /* Restoring default bt priority */ + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); +btc_coex_idle: + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + + default: + WL_ERR(("%s error g_status=%d !!!\n", __FUNCTION__, + btcx_inf->bt_state)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + } + + net_os_wake_unlock(btcx_inf->dev); +} + +static int wl_cfg80211_btcoex_init(struct wl_priv *wl) +{ + struct btcoex_info *btco_inf = NULL; + + btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL); + if (!btco_inf) + return -ENOMEM; + + btco_inf->bt_state = BT_DHCP_IDLE; + btco_inf->ts_dhcp_start = 0; + btco_inf->ts_dhcp_ok = 0; + /* Set up timer for BT */ + btco_inf->timer_ms = 10; + init_timer(&btco_inf->timer); + btco_inf->timer.data = (ulong)btco_inf; + btco_inf->timer.function = wl_cfg80211_bt_timerfunc; + + btco_inf->dev = wl->wdev->netdev; + + INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler); + + wl->btcoex_info = btco_inf; + return 0; +} + +static void +wl_cfg80211_btcoex_deinit(struct wl_priv *wl) +{ + if (!wl->btcoex_info) + return; + + if (!wl->btcoex_info->timer_on) { + wl->btcoex_info->timer_on = 0; + del_timer_sync(&wl->btcoex_info->timer); + } + + cancel_work_sync(&wl->btcoex_info->work); + + kfree(wl->btcoex_info); + wl->btcoex_info = NULL; +} +#endif /* OEM_ANDROID */ + +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command) +{ + char powermode_val = 0; + char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 }; + char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 }; + char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 }; + + uint32 regaddr; + static uint32 saved_reg66; + static uint32 saved_reg41; + static uint32 saved_reg68; + static bool saved_status = FALSE; + +#ifdef COEX_DHCP + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; + struct btcoex_info *btco_inf = wlcfg_drv_priv->btcoex_info; +#endif /* COEX_DHCP */ + + /* Figure out powermode 1 or o command */ + strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1); + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + + WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__)); + + /* Retrieve and saved orig regs value */ + if ((saved_status == FALSE) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) { + saved_status = TRUE; + WL_INFO(("Saved 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + + /* Disable PM mode during dhpc session */ + + /* Disable PM mode during dhpc session */ +#ifdef COEX_DHCP + /* Start BT timer only for SCO connection */ + if (btcoex_is_sco_active(dev)) { + /* btc_params 66 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg66va_dhcp_on[0], + sizeof(buf_reg66va_dhcp_on)); + /* btc_params 41 0x33 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg41va_dhcp_on[0], + sizeof(buf_reg41va_dhcp_on)); + /* btc_params 68 0x190 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg68va_dhcp_on[0], + sizeof(buf_reg68va_dhcp_on)); + saved_status = TRUE; + + btco_inf->bt_state = BT_DHCP_START; + btco_inf->timer_on = 1; + mod_timer(&btco_inf->timer, btco_inf->timer.expires); + WL_INFO(("%s enable BT DHCP Timer\n", + __FUNCTION__)); + } +#endif /* COEX_DHCP */ + } + else if (saved_status == TRUE) { + WL_ERR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__)); + } + } + else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) { + + + /* Restoring PM mode */ + +#ifdef COEX_DHCP + /* Stop any bt timer because DHCP session is done */ + WL_INFO(("%s disable BT DHCP Timer\n", __FUNCTION__)); + if (btco_inf->timer_on) { + btco_inf->timer_on = 0; + del_timer_sync(&btco_inf->timer); + + if (btco_inf->bt_state != BT_DHCP_IDLE) { + /* need to restore original btc flags & extra btc params */ + WL_INFO(("%s bt->bt_state:%d\n", + __FUNCTION__, btco_inf->bt_state)); + /* wake up btcoex thread to restore btlags+params */ + schedule_work(&btco_inf->work); + } + } + + /* Restoring btc_flag paramter anyway */ + if (saved_status == TRUE) + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); +#endif /* COEX_DHCP */ + + /* Restore original values */ + if (saved_status == TRUE) { + regaddr = 66; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg66); + regaddr = 41; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg41); + regaddr = 68; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg68); + + WL_INFO(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + } + saved_status = FALSE; + + } + else { + WL_ERR(("%s Unkwown yet power setting, ignored\n", + __FUNCTION__)); + } + + snprintf(command, 3, "OK"); + + return (strlen("OK")); +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h index a563724..4159bd7 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg80211.h +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h @@ -323,6 +323,27 @@ struct ap_info { u8 *wps_ie; bool security_mode; }; +struct btcoex_info { + struct timer_list timer; + uint32 timer_ms; + uint32 timer_on; + uint32 ts_dhcp_start; /* ms ts ecord time stats */ + uint32 ts_dhcp_ok; /* ms ts ecord time stats */ + bool dhcp_done; /* flag, indicates that host done with + * dhcp before t1/t2 expiration + */ + int bt_state; + struct work_struct work; + struct net_device *dev; +}; + +struct sta_info { + /* Structure to hold WPS IE for a STA */ + u8 probe_req_ie[IE_MAX_LEN]; + u8 assoc_req_ie[IE_MAX_LEN]; + u32 probe_req_ie_len; + u32 assoc_req_ie_len; +}; /* dongle private data of cfg80211 interface */ struct wl_priv { struct wireless_dev *wdev; /* representing wl cfg80211 device */ @@ -343,8 +364,6 @@ struct wl_priv { struct wl_cfg80211_bss_info *bss_info; /* information element object for internal purpose */ struct wl_ie ie; - u8 scan_ie_buf[2048]; - int scan_ie_len; struct ether_addr bssid; /* bssid of currently engaged network */ /* for synchronization of main event thread */ @@ -357,7 +376,7 @@ struct wl_priv { /* control firwmare and nvram paramter downloading */ struct wl_fw_ctrl *fw; struct wl_pmk_list *pmk_list; /* wpa2 pmk list */ - tsk_ctl_t event_tsk; /* task of main event handler thread */ + tsk_ctl_t event_tsk; /* task of main event handler thread */ unsigned long status; /* current dongle status */ void *pub; u32 channel; /* current channel */ @@ -385,9 +404,10 @@ struct wl_priv { u64 cache_cookie; wait_queue_head_t dongle_event_wait; struct ap_info *ap_info; + struct sta_info *sta_info; struct p2p_info *p2p; bool p2p_supported; - s8 last_eventmask[WL_EVENTING_MASK_LEN]; + struct btcoex_info *btcoex_info; }; #define wl_to_wiphy(w) (w->wdev->wiphy) @@ -507,6 +527,7 @@ extern void wl_cfg80211_release_fw(void); extern s8 *wl_cfg80211_get_fwname(void); extern s8 *wl_cfg80211_get_nvramname(void); extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +extern int wl_cfg80211_hang(struct net_device *dev, u16 reason); #ifdef CONFIG_SYSCTL extern s32 wl_cfg80211_sysctl_export_devaddr(void *data); #endif diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c index a5ad9e0..2487a67 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c @@ -627,16 +627,14 @@ wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bss s32 ret = BCME_OK; u32 pos; u8 *ie_buf; - u8 *mgmt_ie_buf; - u32 mgmt_ie_buf_len; - u32 *mgmt_ie_len; + u8 *mgmt_ie_buf = NULL; + u32 mgmt_ie_buf_len = 0; + u32 *mgmt_ie_len = 0; u8 ie_id, ie_len; u8 delete = 0; #define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie) #define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len) - if (bssidx == -1) - return BCME_BADARG; - if (wl->p2p_supported && p2p_on(wl)) { + if (wl->p2p_supported && p2p_on(wl) && bssidx != -1) { if (bssidx == P2PAPI_BSSCFG_PRIMARY) bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); switch (pktflag) { @@ -671,7 +669,7 @@ wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bss CFGP2P_ERR(("not suitable type\n")); return -1; } - } else { + } else if (get_mode_by_netdev(wl, ndev) == WL_MODE_AP) { switch (pktflag) { case VNDR_IE_PRBRSP_FLAG : mgmt_ie_buf = wl->ap_info->probe_res_ie; @@ -689,37 +687,63 @@ wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bss CFGP2P_ERR(("not suitable type\n")); return -1; } + bssidx = 0; + } else if (bssidx == -1 && get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) { + switch (pktflag) { + case VNDR_IE_PRBREQ_FLAG : + mgmt_ie_buf = wl->sta_info->probe_req_ie; + mgmt_ie_len = &wl->sta_info->probe_req_ie_len; + mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie); + break; + case VNDR_IE_ASSOCREQ_FLAG : + mgmt_ie_buf = wl->sta_info->assoc_req_ie; + mgmt_ie_len = &wl->sta_info->assoc_req_ie_len; + mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie); + break; + default: + mgmt_ie_buf = NULL; + mgmt_ie_len = NULL; + CFGP2P_ERR(("not suitable type\n")); + return -1; + } + bssidx = 0; + } else { + CFGP2P_ERR(("not suitable type\n")); + return -1; } - /* Add if there is any extra IE */ - if (vndr_ie && vndr_ie_len) { - CFGP2P_INFO(("Request has extra IE")); - if (vndr_ie_len > mgmt_ie_buf_len) { - CFGP2P_ERR(("extra IE size too big\n")); - ret = -ENOMEM; - } else { - if (mgmt_ie_buf != NULL) { - if ((vndr_ie_len == *mgmt_ie_len) && - (memcmp(mgmt_ie_buf, vndr_ie, vndr_ie_len) == 0)) { - CFGP2P_INFO(("Previous mgmt IE is equals to current IE")); - goto exit; - } - pos = 0; - delete = 1; - ie_buf = (u8 *) mgmt_ie_buf; - while (pos < *mgmt_ie_len) { - ie_id = ie_buf[pos++]; - ie_len = ie_buf[pos++]; - CFGP2P_INFO(("DELELED ID(%d), Len(%d)," - "OUI(%02x:%02x:%02x)\n", - ie_id, ie_len, ie_buf[pos], + + if (vndr_ie_len > mgmt_ie_buf_len) { + CFGP2P_ERR(("extra IE size too big\n")); + ret = -ENOMEM; + } else { + if (mgmt_ie_buf != NULL) { + if (vndr_ie_len && (vndr_ie_len == *mgmt_ie_len) && + (memcmp(mgmt_ie_buf, vndr_ie, vndr_ie_len) == 0)) { + CFGP2P_INFO(("Previous mgmt IE is equals to current IE")); + goto exit; + } + pos = 0; + delete = 1; + ie_buf = (u8 *) mgmt_ie_buf; + while (pos < *mgmt_ie_len) { + ie_id = ie_buf[pos++]; + ie_len = ie_buf[pos++]; + if ((ie_id == DOT11_MNG_VS_ID) && + (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) || + wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) { + CFGP2P_INFO(("DELELED ID : %d, Len : %d , OUI :" + "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos], ie_buf[pos+1], ie_buf[pos+2])); - ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag, - ie_buf+pos, VNDR_SPEC_ELEMENT_ID, - ie_buf+pos+3, ie_len-3, delete); - pos += ie_len; + ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag, ie_buf+pos, + VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, ie_len-3, delete); } - + pos += ie_len; } + + } + *mgmt_ie_len = 0; + /* Add if there is any extra IE */ + if (vndr_ie && vndr_ie_len) { /* save the current IE in wl struct */ memcpy(mgmt_ie_buf, vndr_ie, vndr_ie_len); *mgmt_ie_len = vndr_ie_len; @@ -741,7 +765,6 @@ wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bss pos += ie_len; } } - } #undef IE_TYPE #undef IE_TYPE_LEN diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c index 9b8184a2e..ef77cdc 100644 --- a/drivers/net/wireless/bcmdhd/wl_iw.c +++ b/drivers/net/wireless/bcmdhd/wl_iw.c @@ -1266,6 +1266,7 @@ wl_iw_set_dtim_skip( &iovbuf, sizeof(iovbuf))) >= 0) { p += snprintf(p, MAX_WX_STRING, "OK"); + net_os_set_dtim_skip(dev, bcn_li_dtim); WL_TRACE(("%s: set dtim_skip %d OK\n", __FUNCTION__, @@ -1737,6 +1738,7 @@ wl_iw_control_wl_off( sdioh_stop(NULL); #endif + net_os_set_dtim_skip(dev, 0); dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF); @@ -2415,7 +2417,7 @@ wl_iw_get_range( list = (wl_uint32_list_t *)channels; dwrq->length = sizeof(struct iw_range); - memset(range, 0, sizeof(range)); + memset(range, 0, sizeof(*range)); range->min_nwid = range->max_nwid = 0; @@ -4472,7 +4474,7 @@ wl_iw_set_essid( g_ssid.SSID_len = htod32(g_ssid.SSID_len); - memset(join_params, 0, sizeof(join_params)); + memset(join_params, 0, sizeof(*join_params)); join_params_size = sizeof(join_params->ssid); memcpy(join_params->ssid.SSID, g_ssid.SSID, g_ssid.SSID_len); @@ -6297,6 +6299,8 @@ wl_iw_add_wps_probe_req_ie( str_ptr += WPS_PROBE_REQ_IE_CMD_LENGTH; datalen = wrqu->data.length - WPS_PROBE_REQ_IE_CMD_LENGTH; + + buflen = sizeof(vndr_ie_setbuf_t) + datalen - sizeof(vndr_ie_t); ie_setbuf = (vndr_ie_setbuf_t *)kmalloc(buflen, GFP_KERNEL); if (!ie_setbuf) { @@ -6306,16 +6310,21 @@ wl_iw_add_wps_probe_req_ie( memset(ie_setbuf, 0x00, buflen); + strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1); ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + iecount = htod32(1); memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int)); + pktflag = 0x10; - memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag, sizeof(uint32)); + memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag, + &pktflag, sizeof(uint32)); - memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data, str_ptr, datalen); + memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data, + str_ptr, datalen); total_len = strlen("vndr_ie ") + buflen; bufptr = (char *)kmalloc(total_len, GFP_KERNEL); @@ -7528,11 +7537,15 @@ wl_iw_set_priv( ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra); #endif #ifdef CONFIG_WPS2 - else if (strnicmp(extra, WPS_ADD_PROBE_REQ_IE_CMD, strlen(WPS_ADD_PROBE_REQ_IE_CMD)) == 0) - ret = wl_iw_add_wps_probe_req_ie(dev, info, (union iwreq_data *)dwrq, extra); - else if (strnicmp(extra, WPS_DEL_PROBE_REQ_IE_CMD, strlen(WPS_DEL_PROBE_REQ_IE_CMD)) == 0) - ret = wl_iw_del_wps_probe_req_ie(dev, info, (union iwreq_data *)dwrq, extra); -#endif + else if (strnicmp(extra, WPS_ADD_PROBE_REQ_IE_CMD, + strlen(WPS_ADD_PROBE_REQ_IE_CMD)) == 0) + ret = wl_iw_add_wps_probe_req_ie(dev, info, + (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, WPS_DEL_PROBE_REQ_IE_CMD, + strlen(WPS_DEL_PROBE_REQ_IE_CMD)) == 0) + ret = wl_iw_del_wps_probe_req_ie(dev, info, + (union iwreq_data *)dwrq, extra); +#endif else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0) ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra); else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0) @@ -7761,8 +7774,8 @@ static const struct iw_priv_args wl_iw_priv_args[] = { WL_AP_STA_LIST, - 0, - IW_PRIV_TYPE_CHAR | 0, + IW_PRIV_TYPE_CHAR | 0, + IW_PRIV_TYPE_CHAR | 1024, "AP_GET_STA_LIST" }, diff --git a/drivers/omap_hsi/hsi_driver_int.c b/drivers/omap_hsi/hsi_driver_int.c index 41c3707..52bbba1 100644 --- a/drivers/omap_hsi/hsi_driver_int.c +++ b/drivers/omap_hsi/hsi_driver_int.c @@ -43,9 +43,11 @@ bool hsi_is_channel_busy(struct hsi_channel *ch) if (ch->write_data.addr == NULL) return false; - /* Note: we do not check if there is a read pending, because incoming */ - /* data will trigger an interrupt (FIFO or DMA), and wake up the */ - /* platform, so no need to keep the clocks ON. */ + /* + * Note: we do not check if there is a read pending, because incoming + * data will trigger an interrupt (FIFO or DMA), and wake up the + * platform, so no need to keep the clocks ON. + */ return true; } @@ -373,13 +375,6 @@ static void hsi_do_channel_rx(struct hsi_channel *ch) buff_offset); } } -#if 0 - if (omap_readl(0x4A05A810)) - dev_err(hsi_ctrl->dev, - "RX BUF state is full. " - "Warning disabling interrupt %0x\n", - omap_readl(0x4A05A810)); -#endif hsi_driver_disable_read_interrupt(ch); hsi_reset_ch_read(ch); @@ -394,13 +389,6 @@ done: if (data_read) { spin_unlock(&hsi_ctrl->lock); -#if 0 - dev_warn(hsi_ctrl->dev, "Read callback %d.\n", n_ch); - if (n_ch == 0) - dev_warn(hsi_ctrl->dev, - "Read callback %d \t DATA 0x%0x .\n", - n_ch, data); -#endif (*ch->read_done) (ch->dev, 1); spin_lock(&hsi_ctrl->lock); } @@ -427,12 +415,14 @@ int hsi_do_cawake_process(struct hsi_port *pport) /* Deal with init condition */ if (unlikely(pport->cawake_status < 0)) pport->cawake_status = !cawake_status; - dev_dbg(hsi_ctrl->dev, - "Interrupts are not enabled but CAWAKE has come\n: 0x%0x.\n", - omap_readl(0x4A05880c)); - dev_dbg(hsi_ctrl->dev, - "Interrupts are not enabled but CAWAKE has come\n: 0x%0x.\n", - omap_readl(0x4A058804)); + dev_dbg(hsi_ctrl->dev, "%s: Interrupts are not enabled but CAWAKE came." + "hsi: port[%d] irq[%d] irq_en=0x%08x dma_irq_en=0x%08x\n", + __func__, pport->port_number, pport->n_irq, + hsi_inl(pport->hsi_controller->base, + HSI_SYS_MPU_ENABLE_REG(pport->port_number, + pport->n_irq)), + hsi_inl(pport->hsi_controller->base, + HSI_SYS_GDD_MPU_IRQ_ENABLE_REG)); /* Check CAWAKE line status */ if (cawake_status) { @@ -449,17 +439,13 @@ int hsi_do_cawake_process(struct hsi_port *pport) spin_lock(&hsi_ctrl->lock); } pport->cawake_status = 1; - if (omap_readl(0x4A306404) != 0x0) { - omap_writel(0x00000002, 0x4A004400); - omap_writel(0x003F0703, 0x4A306400); - omap_writel(0x003F0701, 0x4A306400); - omap_writel(0x00000003, 0x4A004400); - } + /* Force HSI to ON_ACTIVE when CAWAKE is high */ hsi_set_pm_force_hsi_on(hsi_ctrl); - /* TODO: Use omap_pm_set_max_dev_wakeup_lat() to set latency */ - /* constraint to prevent L3INIT to enter RET/OFF when CAWAKE */ - /* is high */ + /* + * TODO: Use pm_qos() to set latency constraint to prevent + * L3INIT to enter RET/OFF when CAWAKE is high. + */ spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL); @@ -487,9 +473,10 @@ int hsi_do_cawake_process(struct hsi_port *pport) /* Allow HSI HW to enter IDLE when CAWAKE is low */ hsi_set_pm_default(hsi_ctrl); - /* TODO: Use omap_pm_set_max_dev_wakeup_lat() to release */ - /* latency constraint to prevent L3INIT to enter RET/OFF when */ - /* CAWAKE is low */ + /* + * TODO: Use pm_qos() to release latency constraint to allow + * L3INIT to enter RET/OFF when CAWAKE is low + */ spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL); @@ -642,16 +629,6 @@ static void do_hsi_tasklet(unsigned long hsi_port) dev_dbg(hsi_ctrl->dev, "Int Tasklet : clock_enabled=%d\n", hsi_ctrl->clock_enabled); -#if 0 - if (pport->cawake_off_event == true) - dev_info(hsi_ctrl->dev, - "Tasklet called from OFF/RET MODE THRU PAD CPU ID %d\n", - smp_processor_id()); - else - dev_info(hsi_ctrl->dev, - "Tasklet called from ACTIVE MODE CPU ID %d\n", - smp_processor_id()); -#endif spin_lock(&hsi_ctrl->lock); hsi_clocks_enable(hsi_ctrl->dev, __func__); pport->in_int_tasklet = true; @@ -668,23 +645,13 @@ static void do_hsi_tasklet(unsigned long hsi_port) static irqreturn_t hsi_mpu_handler(int irq, void *p) { struct hsi_port *pport = p; -#if 0 - printk(KERN_INFO "Tasklet called from MPU HANDLER CPU ID %d " - "\t STS 0x%0x \t ENB 0x%0x\n", smp_processor_id(), - omap_readl(0x4A058808), omap_readl(0x4A05880C)); -#endif if (shceduled_already_flag == 0) { -#if 0 - tasklet_hi_schedule(&pport->hsi_tasklet); - if (TASKLET_STATE_SCHED == pport->hsi_tasklet.state) { - printk(KERN_INFO "MPU TASKLET ALREADY SCHEDULED RETURNING\n"); - return IRQ_HANDLED; - } -#endif shceduled_already_flag = 1; tasklet_hi_schedule(&pport->hsi_tasklet); - /* Disable interrupt until Bottom Half has cleared the */ - /* IRQ status register */ + /* + * Disable interrupt until Bottom Half has cleared the + * IRQ status register + */ disable_irq_nosync(pport->irq); } return IRQ_HANDLED; diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 069c8fd..5a6ba3a 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -41,3 +41,10 @@ config OMAP_RPRES help Say Y here if you want to use OMAP remote processor resources frame work. + +config REMOTEPROC_WATCHDOG + bool "OMAP remoteproc watchdog timer" + depends on REMOTE_PROC + default y + help + Say y to enable watchdog timer for remote cores diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index c464c1d..2d2bcb6 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -39,6 +39,7 @@ struct omap_rproc_priv { struct iommu *iommu; int (*iommu_cb)(struct rproc *, u64, u32); + int (*wdt_cb)(struct rproc *); #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND struct omap_mbox *mbox; void __iomem *idle; @@ -321,7 +322,51 @@ static void _destroy_pm_flags(struct rproc *rproc) } } #endif +#ifdef CONFIG_REMOTEPROC_WATCHDOG +static int omap_rproc_watchdog_init(struct rproc *rproc, + int (*callback)(struct rproc *rproc)) +{ + struct omap_rproc_priv *rpp = rproc->priv; + + rpp->wdt_cb = callback; + return 0; +} + +static int omap_rproc_watchdog_exit(struct rproc *rproc) +{ + struct omap_rproc_priv *rpp = rproc->priv; + + rpp->wdt_cb = NULL; + return 0; +} + +static irqreturn_t omap_rproc_watchdog_isr(int irq, void *p) +{ + struct rproc *rproc = p; + struct omap_rproc_pdata *pdata = rproc->dev->platform_data; + struct omap_rproc_timers_info *timers = pdata->timers; + struct omap_dm_timer *timer = NULL; + struct omap_rproc_priv *rpp = rproc->priv; + int i; + + for (i = 0; i < pdata->timers_cnt; i++) { + if (irq == omap_dm_timer_get_irq(timers[i].odt)) { + timer = timers[i].odt; + break; + } + } + + if (!timer) + return IRQ_NONE; + omap_dm_timer_write_status(timer, OMAP_TIMER_INT_OVERFLOW); + + if (rpp->wdt_cb) + rpp->wdt_cb(rproc); + + return IRQ_HANDLED; +} +#endif static inline int omap_rproc_start(struct rproc *rproc, u64 bootaddr) { struct device *dev = rproc->dev; @@ -339,6 +384,16 @@ static inline int omap_rproc_start(struct rproc *rproc, u64 bootaddr) goto out; } omap_dm_timer_set_source(timers[i].odt, OMAP_TIMER_SRC_SYS_CLK); +#ifdef CONFIG_REMOTEPROC_WATCHDOG + /* GPT 9 and 11 are using as WDT */ + if (timers[i].id == 9 || timers[i].id == 11) { + ret = request_irq(omap_dm_timer_get_irq(timers[i].odt), + omap_rproc_watchdog_isr, IRQF_DISABLED, + "rproc-wdt", rproc); + /* Clean counter, remoteproc proc will set the value */ + omap_dm_timer_set_load(timers[i].odt, 0, 0); + } +#endif } ret = omap_device_enable(pdev); @@ -413,6 +468,10 @@ static struct rproc_ops omap_rproc_ops = { .set_lat = omap_rproc_set_lat, .set_bw = omap_rproc_set_l3_bw, .scale = omap_rproc_scale, +#ifdef CONFIG_REMOTEPROC_WATCHDOG + .watchdog_init = omap_rproc_watchdog_init, + .watchdog_exit = omap_rproc_watchdog_exit, +#endif }; static int omap_rproc_probe(struct platform_device *pdev) diff --git a/drivers/remoteproc/remoteproc.c b/drivers/remoteproc/remoteproc.c index 4124011..2e9a6e0 100644 --- a/drivers/remoteproc/remoteproc.c +++ b/drivers/remoteproc/remoteproc.c @@ -55,8 +55,8 @@ static ssize_t rproc_format_trace_buf(char __user *userbuf, size_t count, int i, w_pos; /* Assume write_idx is the penultimate byte in the buffer trace*/ - w_idx = (int *)(buf + (size - (sizeof(u32) * 2))); size = size - (sizeof(u32) * 2); + w_idx = (int *)(buf + size); w_pos = *w_idx; if (from_beg) @@ -127,6 +127,10 @@ static const struct file_operations rproc_name_ops = { DEBUGFS_READONLY_FILE(trace0, rproc->trace_buf0, rproc->trace_len0); DEBUGFS_READONLY_FILE(trace1, rproc->trace_buf1, rproc->trace_len1); +DEBUGFS_READONLY_FILE(trace0_last, rproc->last_trace_buf0, + rproc->last_trace_len0); +DEBUGFS_READONLY_FILE(trace1_last, rproc->last_trace_buf1, + rproc->last_trace_len1); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, rproc->dbg_dir, \ @@ -194,12 +198,19 @@ rproc_da_to_pa(const struct rproc_mem_entry *maps, u64 da, phys_addr_t *pa) static int rproc_mmu_fault_isr(struct rproc *rproc, u64 da, u32 flags) { dev_err(rproc->dev, "%s\n", __func__); - rproc->state = RPROC_CRASHED; - schedule_work(&rproc->mmufault_work); + schedule_work(&rproc->error_work); return -EIO; } +static int rproc_watchdog_isr(struct rproc *rproc) +{ + dev_err(rproc->dev, "Enter %s\n", __func__); + schedule_work(&rproc->error_work); + + return 0; +} + static int _event_notify(struct rproc *rproc, int type, void *data) { struct blocking_notifier_head *nh; @@ -211,6 +222,12 @@ static int _event_notify(struct rproc *rproc, int type, void *data) init_completion(&rproc->error_comp); rproc->state = RPROC_CRASHED; mutex_unlock(&rproc->lock); + if (rproc->trace_buf0 && rproc->last_trace_buf0) + memcpy(rproc->last_trace_buf0, rproc->trace_buf0, + rproc->last_trace_len0); + if (rproc->trace_buf1 && rproc->last_trace_buf1) + memcpy(rproc->last_trace_buf1, rproc->trace_buf1, + rproc->last_trace_len1); #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND pm_runtime_dont_use_autosuspend(rproc->dev); #endif @@ -259,6 +276,15 @@ static void rproc_start(struct rproc *rproc, u64 bootaddr) } } + if (rproc->ops->watchdog_init) { + err = rproc->ops->watchdog_init(rproc, rproc_watchdog_isr); + if (err) { + dev_err(dev, "can't configure watchdog timer %d\n", + err); + goto unlock_mutext; + } + } + err = rproc->ops->start(rproc, bootaddr); if (err) { dev_err(dev, "can't start rproc %s: %d\n", rproc->name, err); @@ -393,9 +419,11 @@ static int rproc_handle_resources(struct rproc *rproc, struct fw_resource *rsc, /* store the da for processing at the end */ if (!trace_da0) { rproc->trace_len0 = rsc->len; + rproc->last_trace_len0 = rsc->len; trace_da0 = da; } else { rproc->trace_len1 = rsc->len; + rproc->last_trace_len1 = rsc->len; trace_da1 = da; } break; @@ -446,6 +474,9 @@ static int rproc_handle_resources(struct rproc *rproc, struct fw_resource *rsc, len -= sizeof(*rsc); } + if (ret) + goto error; + /* * post-process trace buffers, as we cannot rely on the order of the * trace section and the carveout sections. @@ -453,33 +484,54 @@ static int rproc_handle_resources(struct rproc *rproc, struct fw_resource *rsc, * trace buffer memory _is_ normal memory, so we cast away the * __iomem to make sparse happy */ - if (!ret && trace_da0) { + if (trace_da0) { ret = rproc_da_to_pa(rproc->memory_maps, trace_da0, &pa); - if (!ret) { - rproc->trace_buf0 = (__force void *) - ioremap_nocache(pa, rproc->trace_len0); - if (rproc->trace_buf0) - DEBUGFS_ADD(trace0); - else { - dev_err(dev, "can't ioremap trace buffer0\n"); - ret = -EIO; + if (ret) + goto error; + rproc->trace_buf0 = (__force void *) + ioremap_nocache(pa, rproc->trace_len0); + if (rproc->trace_buf0) { + DEBUGFS_ADD(trace0); + if (!rproc->last_trace_buf0) { + rproc->last_trace_buf0 = kzalloc(sizeof(u32) * + rproc->last_trace_len0, + GFP_KERNEL); + if (!rproc->last_trace_buf0) { + ret = -ENOMEM; + goto error; + } + DEBUGFS_ADD(trace0_last); } + } else { + dev_err(dev, "can't ioremap trace buffer0\n"); + ret = -EIO; + goto error; } } - if (!ret && trace_da1) { + if (trace_da1) { ret = rproc_da_to_pa(rproc->memory_maps, trace_da1, &pa); - if (!ret) { - rproc->trace_buf1 = (__force void *) - ioremap_nocache(pa, rproc->trace_len1); - if (rproc->trace_buf1) - DEBUGFS_ADD(trace1); - else { - dev_err(dev, "can't ioremap trace buffer1\n"); - ret = -EIO; + if (ret) + goto error; + rproc->trace_buf1 = (__force void *) + ioremap_nocache(pa, rproc->trace_len1); + if (rproc->trace_buf1) { + DEBUGFS_ADD(trace1); + if (!rproc->last_trace_buf1) { + rproc->last_trace_buf1 = kzalloc(sizeof(u32) * + rproc->last_trace_len1, + GFP_KERNEL); + if (!rproc->last_trace_buf1) { + ret = -ENOMEM; + goto error; + } + DEBUGFS_ADD(trace1_last); } + } else { + dev_err(dev, "can't ioremap trace buffer1\n"); + ret = -EIO; } } - +error: return ret; } @@ -638,6 +690,12 @@ static int rproc_loader(struct rproc *rproc) return 0; } +int rproc_errror_notify(struct rproc *rproc) +{ + return _event_notify(rproc, RPROC_ERROR, NULL); +} +EXPORT_SYMBOL_GPL(rproc_errror_notify); + struct rproc *rproc_get(const char *name) { struct rproc *rproc, *ret = NULL; @@ -760,6 +818,14 @@ void rproc_put(struct rproc *rproc) ret); goto out; } + if (rproc->ops->watchdog_exit) { + ret = rproc->ops->watchdog_exit(rproc); + if (ret) { + dev_err(rproc->dev, "error watchdog_exit %d\n", + ret); + goto out; + } + } if (rproc->ops->iommu_exit) { ret = rproc->ops->iommu_exit(rproc); if (ret) { @@ -784,9 +850,9 @@ out: } EXPORT_SYMBOL_GPL(rproc_put); -static void rproc_mmufault_work(struct work_struct *work) +static void rproc_error_work(struct work_struct *work) { - struct rproc *rproc = container_of(work, struct rproc, mmufault_work); + struct rproc *rproc = container_of(work, struct rproc, error_work); dev_dbg(rproc->dev, "Enter %s\n", __func__); _event_notify(rproc, RPROC_ERROR, NULL); @@ -1093,7 +1159,7 @@ int rproc_register(struct device *dev, const char *name, mutex_init(&rproc->pm_lock); #endif mutex_init(&rproc->lock); - INIT_WORK(&rproc->mmufault_work, rproc_mmufault_work); + INIT_WORK(&rproc->error_work, rproc_error_work); BLOCKING_INIT_NOTIFIER_HEAD(&rproc->nb_error); rproc->state = RPROC_OFFLINE; @@ -1160,6 +1226,8 @@ int rproc_unregister(const char *name) pm_qos_remove_request(rproc->qos_request); kfree(rproc->qos_request); + kfree(rproc->last_trace_buf0); + kfree(rproc->last_trace_buf1); kfree(rproc); return 0; diff --git a/drivers/rpmsg/rpmsg_omx.c b/drivers/rpmsg/rpmsg_omx.c index 61c2e34..34ab83a 100644 --- a/drivers/rpmsg/rpmsg_omx.c +++ b/drivers/rpmsg/rpmsg_omx.c @@ -111,7 +111,7 @@ static LIST_HEAD(rpmsg_omx_services_list); */ #define TILER_START 0x60000000 #define TILER_END 0x80000000 -#define ION_1D_START 0xBDF00000 +#define ION_1D_START 0xBA300000 #define ION_1D_END 0xBFD00000 #define ION_1D_VA 0x88000000 static u32 _rpmsg_pa_to_da(u32 pa) diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c index e0e98dd..28b0df8 100644 --- a/drivers/rtc/alarm.c +++ b/drivers/rtc/alarm.c @@ -389,7 +389,7 @@ static int alarm_suspend(struct platform_device *pdev, pm_message_t state) hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer); hrtimer_cancel(&alarms[ - ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK].timer); + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].timer); tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP]; if (tmp_queue->first) diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 1961067..34d4c91 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -49,6 +49,8 @@ #define UART_OMAP_IIR_ID 0x3e #define UART_OMAP_IIR_RX_TIMEOUT 0xc +#define UART_OMAP_TXFIFO_LVL (0x68/4) + static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS]; /* Forward declaration of functions */ @@ -279,10 +281,10 @@ ignore_char: spin_lock(&up->port.lock); } -static void transmit_chars(struct uart_omap_port *up) +static void transmit_chars(struct uart_omap_port *up, u8 tx_fifo_lvl) { struct circ_buf *xmit = &up->port.state->xmit; - int count; + int count, i; if (up->port.x_char) { serial_out(up, UART_TX, up->port.x_char); @@ -294,14 +296,14 @@ static void transmit_chars(struct uart_omap_port *up) serial_omap_stop_tx(&up->port); return; } - count = up->port.fifosize / 4; - do { + count = up->port.fifosize - tx_fifo_lvl; + for (i = 0; i < count; i++) { serial_out(up, UART_TX, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; - } while (--count > 0); + } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); @@ -422,6 +424,7 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id) unsigned int int_id; unsigned long flags; int ret = IRQ_HANDLED; + u8 tx_fifo_lvl; serial_omap_port_enable(up); iir = serial_in(up, UART_IIR); @@ -450,8 +453,9 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id) check_modem_status(up); if (int_id == UART_IIR_THRI) { - if (lsr & UART_LSR_THRE) - transmit_chars(up); + tx_fifo_lvl = serial_in(up, UART_OMAP_TXFIFO_LVL); + if (lsr & UART_LSR_THRE || tx_fifo_lvl < up->port.fifosize) + transmit_chars(up, tx_fifo_lvl); else ret = IRQ_NONE; } @@ -683,19 +687,19 @@ serial_omap_configure_xonxoff /* * IXON Flag: - * Enable XON/XOFF flow control on output. - * Transmit XON1, XOFF1 + * Flow control for OMAP.TX + * OMAP.RX should listen for XON/XOFF */ if (termios->c_iflag & IXON) - up->efr |= OMAP_UART_SW_TX; + up->efr |= OMAP_UART_SW_RX; /* * IXOFF Flag: - * Enable XON/XOFF flow control on input. - * Receiver compares XON1, XOFF1. + * Flow control for OMAP.RX + * OMAP.TX should send XON/XOFF */ if (termios->c_iflag & IXOFF) - up->efr |= OMAP_UART_SW_RX; + up->efr |= OMAP_UART_SW_TX; serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); @@ -900,22 +904,27 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, /* Hardware Flow Control Configuration */ if (termios->c_cflag & CRTSCTS) { - efr |= (UART_EFR_CTS | UART_EFR_RTS); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); - up->mcr = serial_in(up, UART_MCR); serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); up->efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); - serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG); - serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */ + + up->efr |= (UART_EFR_CTS | UART_EFR_RTS); + serial_out(up, UART_EFR, up->efr); /* Enable AUTORTS and AUTOCTS */ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); up->mcr |= UART_MCR_RTS; serial_out(up, UART_MCR, up->mcr); serial_out(up, UART_LCR, cval); + } else { + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + up->efr = serial_in(up, UART_EFR); + up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS); + serial_out(up, UART_EFR, up->efr); /* Disable AUTORTS and AUTOCTS */ + serial_out(up, UART_LCR, cval); } serial_omap_set_mctrl(&up->port, up->port.mctrl); diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c index b13633b..b7e4029 100644 --- a/drivers/usb/gadget/android.c +++ b/drivers/usb/gadget/android.c @@ -87,8 +87,7 @@ struct android_usb_function { /* Optional: called when the configuration is removed */ void (*unbind_config)(struct android_usb_function *, struct usb_configuration *); - /* Optional: handle ctrl requests before the device is configured - * and/or before the function is enabled */ + /* Optional: handle ctrl requests before the device is configured */ int (*ctrlrequest)(struct android_usb_function *, struct usb_composite_dev *, const struct usb_ctrlrequest *); @@ -1057,6 +1056,12 @@ android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c) } } + /* Special case the accessory function. + * It needs to handle control requests before it is enabled. + */ + if (value < 0) + value = acc_ctrlrequest(cdev, c); + if (value < 0) value = composite_setup(gadget, c); diff --git a/drivers/usb/otg/otg_id.c b/drivers/usb/otg/otg_id.c index ce22b46..8037edb 100644 --- a/drivers/usb/otg/otg_id.c +++ b/drivers/usb/otg/otg_id.c @@ -26,6 +26,8 @@ static struct plist_head otg_id_plist = static struct otg_id_notifier_block *otg_id_active; static bool otg_id_cancelling; static bool otg_id_inited; +static int otg_id_suspended; +static bool otg_id_pending; static void otg_id_cancel(void) { @@ -139,8 +141,65 @@ void otg_id_notify(void) if (otg_id_cancelling) goto out; + if (otg_id_suspended != 0) { + otg_id_pending = true; + goto out; + } + __otg_id_notify(); +out: + mutex_unlock(&otg_id_lock); +} + +/** + * otg_id_suspend + * + * Mark the otg_id subsystem as going into suspend. From here on out, + * any notifications will be deferred until the last otg_id client resumes. + * If there is a pending notification when calling this function, it will + * return a negative errno and expects that the caller will abort suspend. + * Returs 0 on success. + */ +int otg_id_suspend(void) +{ + int ret = 0; + + mutex_lock(&otg_id_lock); + + /* + * if there's a pending notification, tell the caller to abort suspend + */ + if (otg_id_suspended != 0 && otg_id_pending) { + pr_info("otg_id: pending notification, should abort suspend\n"); + ret = -EBUSY; + goto out; + } + otg_id_suspended++; +out: + mutex_unlock(&otg_id_lock); + return ret; +} + +/** + * otg_id_resume + * + * Inform the otg_id subsystem that a client is resuming. If this is the + * last client to be resumed and there's a pending notification, + * otg_id_notify() is called. + */ +void otg_id_resume(void) +{ + mutex_lock(&otg_id_lock); + if (WARN(!otg_id_suspended, "unbalanced otg_id_resume\n")) + goto out; + if (--otg_id_suspended == 0) { + if (otg_id_pending) { + pr_info("otg_id: had pending notification\n"); + otg_id_pending = false; + __otg_id_notify(); + } + } out: mutex_unlock(&otg_id_lock); } diff --git a/drivers/video/hdmi_ti_4xxx_ip.c b/drivers/video/hdmi_ti_4xxx_ip.c index 5fbcd58..8cd8526 100644 --- a/drivers/video/hdmi_ti_4xxx_ip.c +++ b/drivers/video/hdmi_ti_4xxx_ip.c @@ -28,6 +28,7 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/string.h> +#include <linux/omapfb.h> #include "hdmi_ti_4xxx_ip.h" @@ -658,15 +659,10 @@ static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt, { pr_debug("Enter hdmi_wp_video_init_format\n"); - video_fmt->y_res = param->timings.timings.y_res; - video_fmt->x_res = param->timings.timings.x_res; + video_fmt->y_res = param->timings.yres; + video_fmt->x_res = param->timings.xres; - timings->hbp = param->timings.timings.hbp; - timings->hfp = param->timings.timings.hfp; - timings->hsw = param->timings.timings.hsw; - timings->vbp = param->timings.timings.vbp; - timings->vfp = param->timings.timings.vfp; - timings->vsw = param->timings.timings.vsw; + omapfb_fb2dss_timings(¶m->timings, timings); } static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data, @@ -743,9 +739,9 @@ void hdmi_ti_4xxx_basic_configure(struct hdmi_ip_data *ip_data, hdmi_wp_video_config_format(ip_data, &video_format); - video_interface.vsp = cfg->timings.vsync_pol; - video_interface.hsp = cfg->timings.hsync_pol; - video_interface.interlacing = cfg->interlace; + video_interface.vsp = !!(cfg->timings.sync & FB_SYNC_VERT_HIGH_ACT); + video_interface.hsp = !!(cfg->timings.sync & FB_SYNC_HOR_HIGH_ACT); + video_interface.interlacing = cfg->timings.vmode & FB_VMODE_INTERLACED; video_interface.tm = 1 ; /* HDMI_TIMING_MASTER_24BIT */ hdmi_wp_video_config_interface(ip_data, &video_interface); diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index b3d7bc8..ecbb10c 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -35,6 +35,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/ratelimit.h> #include <plat/sram.h> #include <plat/clock.h> @@ -1977,9 +1978,10 @@ int dispc_scaling_decision(u16 width, u16 height, loop: /* err if exhausted search region */ if (x == max_x_decim && y == max_y_decim) { - DSSERR("failed to set up scaling, " + DSSERR("failed to set up scaling %u*%u to %u*%u, " "required fclk rate = %lu Hz, " - "current fclk rate = %lu Hz\n", + "current = %lu Hz\n", + width, height, out_width, out_height, fclk, fclk_max); return -EINVAL; } @@ -3516,7 +3518,7 @@ static void dispc_error_worker(struct work_struct *work) struct omap_overlay_manager *manager = NULL; bool enable = false; - DSSERR("SYNC_LOST_DIGIT, disabling TV\n"); + pr_err_ratelimited("SYNC_LOST_DIGIT, disabling TV\n"); for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { struct omap_overlay_manager *mgr; diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 171e6c2..73ca793 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -2429,7 +2429,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev) if (cpu_is_omap44xx()) { /* DDR_CLK_ALWAYS_ON */ - REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 13, 13); + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13); /* HS_AUTO_STOP_ENABLE */ REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 18, 18); } @@ -3880,6 +3880,7 @@ static int dsi_video_proto_config(struct omap_dss_device *dssdev) r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */ r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */ r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ + r = FLD_MOD(r, 1, 20, 20); /* BLANKING_MODE */ r = FLD_MOD(r, 1, 21, 21); /* HFP_BLANKING */ r = FLD_MOD(r, 1, 22, 22); /* HBP_BLANKING */ r = FLD_MOD(r, 1, 23, 23); /* HSA_BLANKING */ @@ -3998,6 +3999,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) unsigned ddr_clk_pre, ddr_clk_post; unsigned enter_hs_mode_lat, exit_hs_mode_lat; unsigned ths_eot; + unsigned offset_ddr_clk; u32 r; r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); @@ -4022,9 +4024,13 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev)); + /* DDR PRE & DDR POST increased to keep LP-11 under 10 usec */ + offset_ddr_clk = dssdev->clocks.dsi.offset_ddr_clk; + ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, - 4); - ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot; + 4) + offset_ddr_clk; + ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot + + offset_ddr_clk; BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index 14acc2b..2953339 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -513,10 +513,13 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev); void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev); int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev, struct omap_video_timings *timings); +int omapdss_hdmi_display_set_mode(struct omap_dss_device *dssdev, + struct fb_videomode *mode); int hdmi_panel_hpd_handler(int hpd); int omapdss_hdmi_get_deepcolor(void); void omapdss_hdmi_set_deepcolor(int val); int hdmi_get_current_hpd(void); +void hdmi_get_monspecs(struct fb_monspecs *specs); u8 *hdmi_read_edid(struct omap_video_timings *); int hdmi_panel_init(void); diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 52e123a..78d98ad 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -37,6 +37,8 @@ #include <video/omapdss.h> #include <video/hdmi_ti_4xxx_ip.h> #include <linux/gpio.h> +#include <linux/fb.h> +#include <linux/omapfb.h> #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) #include <sound/soc.h> @@ -73,6 +75,7 @@ static struct { int mode; u8 edid[HDMI_EDID_MAX_LENGTH]; u8 edid_set; + bool custom_set; enum hdmi_deep_color_mode deep_color; struct hdmi_config cfg; @@ -98,77 +101,190 @@ static struct { * map it to corresponding CEA or VESA index. */ -static const struct hdmi_timings cea_vesa_timings[OMAP_HDMI_TIMINGS_NB] = { - { {640, 480, 25200, 96, 16, 48, 2, 10, 33} , 0 , 0}, - { {1280, 720, 74250, 40, 440, 220, 5, 5, 20}, 1, 1}, - { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1}, - { {720, 480, 27027, 62, 16, 60, 6, 9, 30}, 0, 0}, - { {2880, 576, 108000, 256, 48, 272, 5, 5, 39}, 0, 0}, - { {1440, 240, 27027, 124, 38, 114, 3, 4, 15}, 0, 0}, - { {1440, 288, 27000, 126, 24, 138, 3, 2, 19}, 0, 0}, - { {1920, 540, 74250, 44, 528, 148, 5, 2, 15}, 1, 1}, - { {1920, 540, 74250, 44, 88, 148, 5, 2, 15}, 1, 1}, - { {1920, 1080, 148500, 44, 88, 148, 5, 4, 36}, 1, 1}, - { {720, 576, 27000, 64, 12, 68, 5, 5, 39}, 0, 0}, - { {1440, 576, 54000, 128, 24, 136, 5, 5, 39}, 0, 0}, - { {1920, 1080, 148500, 44, 528, 148, 5, 4, 36}, 1, 1}, - { {2880, 480, 108108, 248, 64, 240, 6, 9, 30}, 0, 0}, - { {1920, 1080, 74250, 44, 638, 148, 5, 4, 36}, 1, 1}, - /* VESA From Here */ - { {640, 480, 25175, 96, 16, 48, 2 , 11, 31}, 0, 0}, - { {800, 600, 40000, 128, 40, 88, 4 , 1, 23}, 1, 1}, - { {848, 480, 33750, 112, 16, 112, 8 , 6, 23}, 1, 1}, - { {1280, 768, 79500, 128, 64, 192, 7 , 3, 20}, 1, 0}, - { {1280, 800, 83500, 128, 72, 200, 6 , 3, 22}, 1, 0}, - { {1360, 768, 85500, 112, 64, 256, 6 , 3, 18}, 1, 1}, - { {1280, 960, 108000, 112, 96, 312, 3 , 1, 36}, 1, 1}, - { {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38}, 1, 1}, - { {1024, 768, 65000, 136, 24, 160, 6, 3, 29}, 0, 0}, - { {1400, 1050, 121750, 144, 88, 232, 4, 3, 32}, 1, 0}, - { {1440, 900, 106500, 152, 80, 232, 6, 3, 25}, 1, 0}, - { {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30}, 1, 0}, - { {1366, 768, 85500, 143, 70, 213, 3, 3, 24}, 1, 1}, - { {1920, 1080, 148500, 44, 148, 80, 5, 4, 36}, 1, 1}, - { {1280, 768, 68250, 32, 48, 80, 7, 3, 12}, 0, 1}, - { {1400, 1050, 101000, 32, 48, 80, 4, 3, 23}, 0, 1}, - { {1680, 1050, 119000, 32, 48, 80, 6, 3, 21}, 0, 1}, - { {1280, 800, 79500, 32, 48, 80, 6, 3, 14}, 0, 1}, - { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1} -}; - -/* - * This is a static mapping array which maps the timing values - * with corresponding CEA / VESA code - */ -static const int code_index[OMAP_HDMI_TIMINGS_NB] = { - 1, 19, 4, 2, 37, 6, 21, 20, 5, 16, 17, 29, 31, 35, 32, - /* <--15 CEA 17--> vesa*/ - 4, 9, 0xE, 0x17, 0x1C, 0x27, 0x20, 0x23, 0x10, 0x2A, - 0X2F, 0x3A, 0X51, 0X52, 0x16, 0x29, 0x39, 0x1B +struct fb_videomode cea_timings[] = { + /* 640x480 at 60.00 Hz */ + [1] = { NULL, 60, + 640, 480, 39682, 48, 16, 33, 10, 96, 2, + 0, FB_VMODE_NONINTERLACED, }, + /* 720x480 at 60.00 Hz */ + [2] = { NULL, 60, + 720, 480, 37000, 60, 16, 30, 9, 62, 6, + 0, FB_VMODE_NONINTERLACED, }, + /* 720x480 at 60.00 Hz */ + [3] = { NULL, 60, + 720, 480, 37000, 60, 16, 30, 9, 62, 6, + 0, FB_VMODE_NONINTERLACED, }, + /* 1280x720 at 60.00 Hz */ + [4] = { NULL, 60, + 1280, 720, 13468, 220, 110, 20, 5, 40, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1920x540 at 60.05 Hz */ + [5] = { NULL, 60, + 1920, 1080, 13468, 148, 88, 15, 2, 44, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_INTERLACED, }, + /* 1440x240 at 60.11 Hz */ + [6] = { NULL, 60, + 1440, 480, 37000, 114, 38, 15, 4, 124, 3, + 0, FB_VMODE_INTERLACED, }, + /* 1440x240 at 60.11 Hz */ + [7] = { NULL, 60, + 1440, 480, 37000, 114, 38, 15, 4, 124, 3, + 0, FB_VMODE_INTERLACED, }, + /* 1920x1080 at 60.00 Hz */ + [16] = { NULL, 60, + 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 720x576 at 50.00 Hz */ + [17] = { NULL, 50, + 720, 576, 37037, 68, 12, 39, 5, 64, 5, + 0, FB_VMODE_NONINTERLACED, }, + /* 720x576 at 50.00 Hz */ + [18] = { NULL, 50, + 720, 576, 37037, 68, 12, 39, 5, 64, 5, + 0, FB_VMODE_NONINTERLACED, }, + /* 1280x720 at 50.00 Hz */ + [19] = { NULL, 50, + 1280, 720, 13468, 220, 440, 20, 5, 40, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1920x540 at 50.04 Hz */ + [20] = { NULL, 50, + 1920, 1080, 13468, 148, 528, 15, 2, 44, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_INTERLACED, }, + /* 1440x288 at 50.08 Hz */ + [21] = { NULL, 50, + 1440, 576, 37037, 138, 24, 19, 2, 126, 3, + 0, FB_VMODE_INTERLACED, }, + /* 1440x288 at 50.08 Hz */ + [22] = { NULL, 50, + 1440, 576, 37037, 138, 24, 19, 2, 126, 3, + 0, FB_VMODE_INTERLACED, }, + /* 1440x576 at 50.00 Hz */ + [29] = { NULL, 50, + 1440, 576, 18518, 136, 24, 39, 5, 128, 5, + 0, FB_VMODE_NONINTERLACED, }, + /* 1440x576 at 50.00 Hz */ + [30] = { NULL, 50, + 1440, 576, 18518, 136, 24, 39, 5, 128, 5, + 0, FB_VMODE_NONINTERLACED, }, + /* 1920x1080 at 50.00 Hz */ + [31] = { NULL, 50, + 1920, 1080, 6734, 148, 528, 36, 4, 44, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1920x1080 at 24.00 Hz */ + [32] = { NULL, 24, + 1920, 1080, 13468, 148, 638, 36, 4, 44, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 2880x480 at 60.00 Hz */ + [35] = { NULL, 60, + 2880, 480, 9250, 240, 64, 30, 9, 248, 6, + 0, FB_VMODE_NONINTERLACED, }, + /* 2880x480 at 60.00 Hz */ + [36] = { NULL, 60, + 2880, 480, 9250, 240, 64, 30, 9, 248, 6, + 0, FB_VMODE_NONINTERLACED, }, + /* 2880x576 at 50.00 Hz */ + [37] = { NULL, 50, + 2880, 576, 9259, 272, 48, 39, 5, 256, 5, + 0, FB_VMODE_NONINTERLACED, }, + /* 2880x576 at 50.00 Hz */ + [38] = { NULL, 50, + 2880, 576, 9259, 272, 48, 39, 5, 256, 5, + 0, FB_VMODE_NONINTERLACED, }, }; - -/* - * This is reverse static mapping which maps the CEA / VESA code - * to the corresponding timing values - */ -static const int code_cea[39] = { - -1, 0, 3, 3, 2, 8, 5, 5, -1, -1, - -1, -1, -1, -1, -1, -1, 9, 10, 10, 1, - 7, 6, 6, -1, -1, -1, -1, -1, -1, 11, - 11, 12, 14, -1, -1, 13, 13, 4, 4 +struct fb_videomode vesa_timings[] = { + /* 640x480 at 60.05 Hz */ + [4] = { NULL, 60, + 640, 480, 39721, 48, 16, 31, 11, 96, 2, + 0, FB_VMODE_NONINTERLACED, }, + /* 800x600 at 60.32 Hz */ + [9] = { NULL, 60, + 800, 600, 25000, 88, 40, 23, 1, 128, 4, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 848x480 at 60.00 Hz */ + [14] = { NULL, 60, + 848, 480, 29629, 112, 16, 23, 6, 112, 8, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1024x768 at 60.00 Hz */ + [16] = { NULL, 60, + 1024, 768, 15384, 160, 24, 29, 3, 136, 6, + 0, FB_VMODE_NONINTERLACED, }, + /* 1280x768 at 59.99 Hz */ + [22] = { NULL, 59, + 1280, 768, 14652, 80, 48, 12, 3, 32, 7, + FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1280x768 at 59.87 Hz */ + [23] = { NULL, 59, + 1280, 768, 12578, 192, 64, 20, 3, 128, 7, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1280x800 at 67.08 Hz */ + [27] = { NULL, 67, + 1280, 800, 12578, 80, 48, 14, 3, 32, 6, + FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1280x800 at 59.81 Hz */ + [28] = { NULL, 59, + 1280, 800, 11976, 200, 72, 22, 3, 128, 6, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1280x960 at 60.00 Hz */ + [32] = { NULL, 60, + 1280, 960, 9259, 312, 96, 36, 1, 112, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1280x1024 at 60.02 Hz */ + [35] = { NULL, 60, + 1280, 1024, 9259, 248, 48, 38, 1, 112, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1360x768 at 60.02 Hz */ + [39] = { NULL, 60, + 1360, 768, 11695, 256, 64, 18, 3, 112, 6, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1400x1050 at 59.95 Hz */ + [41] = { NULL, 59, + 1400, 1050, 9900, 80, 48, 23, 3, 32, 4, + FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1400x1050 at 59.98 Hz */ + [42] = { NULL, 59, + 1400, 1050, 8213, 232, 88, 32, 3, 144, 4, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1440x900 at 59.89 Hz */ + [47] = { NULL, 59, + 1440, 900, 9389, 232, 80, 25, 3, 152, 6, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1680x1050 at 59.88 Hz */ + [57] = { NULL, 59, + 1680, 1050, 8403, 80, 48, 21, 3, 32, 6, + FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1680x1050 at 59.95 Hz */ + [58] = { NULL, 59, + 1680, 1050, 6837, 280, 104, 30, 3, 176, 6, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, }, + /* 1366x768 at 59.79 Hz */ + [81] = { NULL, 59, + 1366, 768, 11695, 213, 70, 24, 3, 143, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1920x1080 at 60.22 Hz */ + [82] = { NULL, 60, + 1920, 1080, 6734, 80, 148, 36, 4, 44, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, + /* 1280x720 at 60.00 Hz */ + [84] = { NULL, 60, + 1280, 720, 13468, 220, 110, 20, 5, 40, 5, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, }, }; -static const int code_vesa[85] = { - -1, -1, -1, -1, 15, -1, -1, -1, -1, 16, - -1, -1, -1, -1, 17, -1, 23, -1, -1, -1, - -1, -1, 29, 18, -1, -1, -1, 32, 19, -1, - -1, -1, 21, -1, -1, 22, -1, -1, -1, 20, - -1, 30, 24, -1, -1, -1, -1, 25, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 31, 26, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 27, 28, -1, 33}; - static const u8 edid_header[8] = {0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0}; static int hdmi_runtime_get(void) @@ -232,187 +348,127 @@ int hdmi_init_display(struct omap_dss_device *dssdev) return 0; } -static void copy_hdmi_to_dss_timings(struct hdmi_video_timings hdmi_timings, - struct omap_video_timings *timings) +static int relaxed_fb_mode_is_equal(const struct fb_videomode *mode1, + const struct fb_videomode *mode2) { - timings->x_res = hdmi_timings.x_res; - timings->y_res = hdmi_timings.y_res; - timings->pixel_clock = hdmi_timings.pixel_clock; - timings->hbp = hdmi_timings.hbp; - timings->hfp = hdmi_timings.hfp; - timings->hsw = hdmi_timings.hsw; - timings->vbp = hdmi_timings.vbp; - timings->vfp = hdmi_timings.vfp; - timings->vsw = hdmi_timings.vsw; + return (mode1->xres == mode2->xres && + mode1->yres == mode2->yres && + mode1->pixclock <= mode2->pixclock + 1 && + mode1->pixclock >= mode2->pixclock - 1 && + mode1->hsync_len + mode1->left_margin + mode1->right_margin == + mode2->hsync_len + mode2->left_margin + mode2->right_margin && + mode1->vsync_len + mode1->upper_margin + mode1->lower_margin == + mode2->vsync_len + mode2->upper_margin + mode2->lower_margin && + (mode1->vmode & FB_VMODE_INTERLACED) == + (mode2->vmode & FB_VMODE_INTERLACED)); } -static int get_timings_index(void) +static int hdmi_set_timings(const struct fb_videomode *vm, bool check_only) { - int code; - - if (hdmi.mode == 0) - code = code_vesa[hdmi.code]; - else - code = code_cea[hdmi.code]; - - if (code == -1) { - /* HDMI code 4 corresponds to 640 * 480 VGA */ - hdmi.code = 4; - /* DVI mode 1 corresponds to HDMI 0 to DVI */ - hdmi.mode = HDMI_DVI; + int i = 0; + DSSDBG("hdmi_get_code\n"); - code = code_vesa[hdmi.code]; + if (!vm->xres || !vm->yres || !vm->pixclock) + goto fail; + + for (i = 0; i < ARRAY_SIZE(cea_timings); i++) { + if (relaxed_fb_mode_is_equal(cea_timings + i, vm)) { + if (check_only) + return 1; + hdmi.cfg.cm.code = i; + hdmi.cfg.cm.mode = HDMI_HDMI; + hdmi.cfg.timings = cea_timings[hdmi.cfg.cm.code]; + goto done; + } } - return code; -} -static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing) -{ - int i = 0, code = -1, temp_vsync = 0, temp_hsync = 0; - int timing_vsync = 0, timing_hsync = 0; - struct hdmi_video_timings temp; - struct hdmi_cm cm = {-1}; - DSSDBG("hdmi_get_code\n"); - - for (i = 0; i < OMAP_HDMI_TIMINGS_NB; i++) { - temp = cea_vesa_timings[i].timings; - if ((temp.pixel_clock == timing->pixel_clock) && - (temp.x_res == timing->x_res) && - (temp.y_res == timing->y_res)) { - - temp_hsync = temp.hfp + temp.hsw + temp.hbp; - timing_hsync = timing->hfp + timing->hsw + timing->hbp; - temp_vsync = temp.vfp + temp.vsw + temp.vbp; - timing_vsync = timing->vfp + timing->vsw + timing->vbp; - - DSSDBG("temp_hsync = %d , temp_vsync = %d" - "timing_hsync = %d, timing_vsync = %d\n", - temp_hsync, temp_hsync, - timing_hsync, timing_vsync); - - if ((temp_hsync == timing_hsync) && - (temp_vsync == timing_vsync)) { - code = i; - cm.code = code_index[i]; - if (code < 14) - cm.mode = HDMI_HDMI; - else - cm.mode = HDMI_DVI; - DSSDBG("Hdmi_code = %d mode = %d\n", - cm.code, cm.mode); - break; - } + for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) { + if (relaxed_fb_mode_is_equal(vesa_timings + i, vm)) { + if (check_only) + return 1; + hdmi.cfg.cm.code = i; + hdmi.cfg.cm.mode = HDMI_DVI; + hdmi.cfg.timings = vesa_timings[hdmi.cfg.cm.code]; + goto done; + } + } +#if 0 + for (i = 0; i < sizeof(cea_modes); i++) { + if (relaxed_fb_mode_is_equal(cea_modes + i, vm)) { + if (check_only) + return 1; + hdmi.cfg.cm.code = i; + hdmi.cfg.cm.mode = HDMI_HDMI; + hdmi.cfg.timings = cea_modes[hdmi.cfg.cm.code]; + goto done; } } - return cm; + for (i = 0; i < 34; i++) { + if (relaxed_fb_mode_is_equal(vesa_modes + i, vm)) { + if (check_only) + return 1; + hdmi.cfg.cm.code = i; + hdmi.cfg.cm.mode = HDMI_DVI; + hdmi.cfg.timings = vesa_modes[hdmi.cfg.cm.code]; + goto done; + } + } +#endif +fail: + if (check_only) + return 0; + hdmi.cfg.cm.code = 1; + hdmi.cfg.cm.mode = HDMI_HDMI; + hdmi.cfg.timings = cea_timings[hdmi.cfg.cm.code]; + + i = -1; +done: + + DSSDBG("%s-%d\n", hdmi.cfg.cm.mode ? "CEA" : "VESA", hdmi.cfg.cm.code); + return i >= 0; } -static void get_horz_vert_timing_info(int current_descriptor_addrs, u8 *edid , - struct omap_video_timings *timings) +void hdmi_get_monspecs(struct fb_monspecs *specs) { - /* X and Y resolution */ - timings->x_res = (((edid[current_descriptor_addrs + 4] & 0xF0) << 4) | - edid[current_descriptor_addrs + 2]); - timings->y_res = (((edid[current_descriptor_addrs + 7] & 0xF0) << 4) | - edid[current_descriptor_addrs + 5]); - - timings->pixel_clock = ((edid[current_descriptor_addrs + 1] << 8) | - edid[current_descriptor_addrs]); - - timings->pixel_clock = 10 * timings->pixel_clock; - - /* HORIZONTAL FRONT PORCH */ - timings->hfp = edid[current_descriptor_addrs + 8] | - ((edid[current_descriptor_addrs + 11] & 0xc0) << 2); - /* HORIZONTAL SYNC WIDTH */ - timings->hsw = edid[current_descriptor_addrs + 9] | - ((edid[current_descriptor_addrs + 11] & 0x30) << 4); - /* HORIZONTAL BACK PORCH */ - timings->hbp = (((edid[current_descriptor_addrs + 4] & 0x0F) << 8) | - edid[current_descriptor_addrs + 3]) - - (timings->hfp + timings->hsw); - /* VERTICAL FRONT PORCH */ - timings->vfp = ((edid[current_descriptor_addrs + 10] & 0xF0) >> 4) | - ((edid[current_descriptor_addrs + 11] & 0x0f) << 2); - /* VERTICAL SYNC WIDTH */ - timings->vsw = (edid[current_descriptor_addrs + 10] & 0x0F) | - ((edid[current_descriptor_addrs + 11] & 0x03) << 4); - /* VERTICAL BACK PORCH */ - timings->vbp = (((edid[current_descriptor_addrs + 7] & 0x0F) << 8) | - edid[current_descriptor_addrs + 6]) - - (timings->vfp + timings->vsw); + int i, j; + char *edid = (char *) hdmi.edid; -} + memset(specs, 0x0, sizeof(*specs)); + if (!hdmi.edid_set) + return; -/* Description : This function gets the resolution information from EDID */ -static void get_edid_timing_data(u8 *edid) -{ - u8 count; - u16 current_descriptor_addrs; - struct hdmi_cm cm; - struct omap_video_timings edid_timings; - - /* search block 0, there are 4 DTDs arranged in priority order */ - for (count = 0; count < EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR; count++) { - current_descriptor_addrs = - EDID_DESCRIPTOR_BLOCK0_ADDRESS + - count * EDID_TIMING_DESCRIPTOR_SIZE; - get_horz_vert_timing_info(current_descriptor_addrs, - edid, &edid_timings); - cm = hdmi_get_code(&edid_timings); - DSSDBG("Block0[%d] value matches code = %d , mode = %d\n", - count, cm.code, cm.mode); - if (cm.code == -1) { - continue; - } else { - hdmi.code = cm.code; - hdmi.mode = cm.mode; - DSSDBG("code = %d , mode = %d\n", - hdmi.code, hdmi.mode); - return; - } - } - if (edid[0x7e] != 0x00) { - for (count = 0; count < EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR; - count++) { - current_descriptor_addrs = - EDID_DESCRIPTOR_BLOCK1_ADDRESS + - count * EDID_TIMING_DESCRIPTOR_SIZE; - get_horz_vert_timing_info(current_descriptor_addrs, - edid, &edid_timings); - cm = hdmi_get_code(&edid_timings); - DSSDBG("Block1[%d] value matches code = %d, mode = %d", - count, cm.code, cm.mode); - if (cm.code == -1) { - continue; - } else { - hdmi.code = cm.code; - hdmi.mode = cm.mode; - DSSDBG("code = %d , mode = %d\n", - hdmi.code, hdmi.mode); - return; - } - } + fb_edid_to_monspecs(edid, specs); + if (specs->modedb == NULL) + return; + + for (i = 1; i <= edid[0x7e] && i * 128 < HDMI_EDID_MAX_LENGTH; i++) { + if (edid[i * 128] == 0x2) + fb_edid_add_monspecs(edid + i * 128, specs); } - DSSINFO("no valid timing found , falling back to VGA\n"); - hdmi.code = 4; /* setting default value of 640 480 VGA */ - hdmi.mode = HDMI_DVI; + /* filter out resolutions we don't support */ + for (i = j = 0; i < specs->modedb_len; i++) { + if (hdmi_set_timings(&specs->modedb[i], true)) + specs->modedb[j++] = specs->modedb[i]; + } + specs->modedb_len = j; } u8 *hdmi_read_edid(struct omap_video_timings *dp) { - int ret = 0, code, i; + int ret = 0, i; + + if (hdmi.edid_set) + return hdmi.edid; memset(hdmi.edid, 0, HDMI_EDID_MAX_LENGTH); - hdmi.edid_set = false; ret = read_ti_4xxx_edid(&hdmi.hdmi_data, hdmi.edid, HDMI_EDID_MAX_LENGTH); for (i = 0; i < 256; i += 16) - pr_debug("edid[%03x] = %02x %02x %02x %02x %02x %02x %02x %02x " + pr_info("edid[%03x] = %02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, hdmi.edid[i], hdmi.edid[i + 1], hdmi.edid[i + 2], hdmi.edid[i + 3], hdmi.edid[i + 4], hdmi.edid[i + 5], @@ -421,44 +477,16 @@ u8 *hdmi_read_edid(struct omap_video_timings *dp) hdmi.edid[i + 12], hdmi.edid[i + 13], hdmi.edid[i + 14], hdmi.edid[i + 15]); - if (!ret) { - if (!memcmp(hdmi.edid, edid_header, sizeof(edid_header))) { - /* search for timings of default resolution */ - get_edid_timing_data(hdmi.edid); - hdmi.edid_set = true; - } - } else { + if (ret) { DSSWARN("failed to read E-EDID\n"); - ret = -EINVAL; - } - - if (!hdmi.edid_set) { - DSSINFO("fallback to VGA\n"); - hdmi.code = 4; /* setting default value of 640 480 VGA */ - hdmi.mode = HDMI_DVI; + return NULL; } - code = get_timings_index(); - - copy_hdmi_to_dss_timings(cea_vesa_timings[code].timings, dp); + if (memcmp(hdmi.edid, edid_header, sizeof(edid_header))) + return NULL; - return ret ? NULL : hdmi.edid; -} - -static void update_hdmi_timings(struct hdmi_config *cfg, - struct omap_video_timings *timings, int code) -{ - cfg->timings.timings.x_res = timings->x_res; - cfg->timings.timings.y_res = timings->y_res; - cfg->timings.timings.hbp = timings->hbp; - cfg->timings.timings.hfp = timings->hfp; - cfg->timings.timings.hsw = timings->hsw; - cfg->timings.timings.vbp = timings->vbp; - cfg->timings.timings.vfp = timings->vfp; - cfg->timings.timings.vsw = timings->vsw; - cfg->timings.timings.pixel_clock = timings->pixel_clock; - cfg->timings.vsync_pol = cea_vesa_timings[code].vsync_pol; - cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol; + hdmi.edid_set = true; + return hdmi.edid; } static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, @@ -503,7 +531,7 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, static int hdmi_power_on(struct omap_dss_device *dssdev) { - int r, code = 0; + int r; struct hdmi_pll_info pll_data; struct omap_video_timings *p; unsigned long phy; @@ -522,16 +550,10 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) dssdev->panel.timings.x_res, dssdev->panel.timings.y_res); - if (!hdmi.custom_set) { - DSSDBG("Read EDID as no EDID is not set on poweron\n"); + if (!hdmi.custom_set) + hdmi_set_timings(&vesa_timings[4], false); - hdmi_read_edid(p); - } - code = get_timings_index(); - copy_hdmi_to_dss_timings(cea_vesa_timings[code].timings, - &dssdev->panel.timings); - - update_hdmi_timings(&hdmi.cfg, p, code); + omapfb_fb2dss_timings(&hdmi.cfg.timings, &dssdev->panel.timings); phy = p->pixel_clock; @@ -612,7 +634,6 @@ static void hdmi_power_off(struct omap_dss_device *dssdev) hdmi_ti_4xxx_set_pll_pwr(&hdmi.hdmi_data, HDMI_PLLPWRCMD_ALLOFF); hdmi_runtime_put(); hdmi.deep_color = HDMI_DEEP_COLOR_24BIT; - hdmi.edid_set = false; } void omapdss_hdmi_set_deepcolor(int val) @@ -643,30 +664,46 @@ static irqreturn_t hpd_irq_handler(int irq, void *ptr) int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - struct hdmi_cm cm; + struct fb_videomode t; - cm = hdmi_get_code(timings); - if (cm.code == -1) { - DSSERR("Invalid timing entered\n"); - return -EINVAL; - } + omapfb_dss2fb_timings(timings, &t); + /* also check interlaced timings */ + if (!hdmi_set_timings(&t, true)) { + t.yres *= 2; + t.vmode |= FB_VMODE_INTERLACED; + } + if (!hdmi_set_timings(&t, true)) + return -EINVAL; return 0; +} +int omapdss_hdmi_display_set_mode(struct omap_dss_device *dssdev, + struct fb_videomode *vm) +{ + int r1, r2; + /* turn the hdmi off and on to get new timings to use */ + omapdss_hdmi_display_disable(dssdev); + r1 = hdmi_set_timings(vm, false) ? 0 : -EINVAL; + hdmi.custom_set = 1; + hdmi.code = hdmi.cfg.cm.code; + hdmi.mode = hdmi.cfg.cm.mode; + r2 = omapdss_hdmi_display_enable(dssdev); + return r1 ? : r2; } void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev) { - struct hdmi_cm cm; + struct fb_videomode t; - hdmi.custom_set = 1; - cm = hdmi_get_code(&dssdev->panel.timings); - /* turn the hdmi off and on to get new timings to use */ - omapdss_hdmi_display_disable(dssdev); - hdmi.code = cm.code; - hdmi.mode = cm.mode; - omapdss_hdmi_display_enable(dssdev); - hdmi.custom_set = 0; + omapfb_dss2fb_timings(&dssdev->panel.timings, &t); + /* also check interlaced timings */ + if (!hdmi_set_timings(&t, true)) { + t.yres *= 2; + t.vmode |= FB_VMODE_INTERLACED; + } + + omapdss_hdmi_display_set_mode(dssdev, &t); } int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev) @@ -735,6 +772,13 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev) hdmi_power_off(dssdev); + if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { + /* clear EDID and mode on disable only */ + hdmi.edid_set = false; + hdmi.custom_set = 0; + pr_info("hdmi: clearing EDID info\n"); + } + regulator_disable(hdmi.hdmi_reg); regulator_put(hdmi.hdmi_reg); diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c index aafe636..9e2cfc3 100644 --- a/drivers/video/omap2/dss/hdmi_panel.c +++ b/drivers/video/omap2/dss/hdmi_panel.c @@ -139,6 +139,8 @@ static int hdmi_panel_suspend(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; + hdmi_panel_hpd_handler(0); + omapdss_hdmi_display_disable(dssdev); err: mutex_unlock(&hdmi.hdmi_lock); @@ -194,23 +196,41 @@ static void hdmi_hotplug_detect_worker(struct work_struct *work) if (dssdev == NULL) return; + mutex_lock(&hdmi.hdmi_lock); if (state == HPD_STATE_OFF) { switch_set_state(&hdmi.hpd_switch, 0); - dssdev->driver->disable(dssdev); - return; + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { + mutex_unlock(&hdmi.hdmi_lock); + dssdev->driver->disable(dssdev); + mutex_lock(&hdmi.hdmi_lock); + } + goto done; } else { if (state == HPD_STATE_START) { + mutex_unlock(&hdmi.hdmi_lock); dssdev->driver->enable(dssdev); + mutex_lock(&hdmi.hdmi_lock); + } else if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || + hdmi.hpd_switch.state) { + /* powered down after enable - skip EDID read */ + goto done; } else if (hdmi_read_edid(&dssdev->panel.timings)) { + /* get monspecs from edid */ + hdmi_get_monspecs(&dssdev->panel.monspecs); + pr_info("panel size %d by %d\n", + dssdev->panel.monspecs.max_x, + dssdev->panel.monspecs.max_y); switch_set_state(&hdmi.hpd_switch, 1); - return; + goto done; } else if (state == HPD_STATE_EDID_TRYLAST){ pr_info("Failed to read EDID after %d times. Giving up.", state - HPD_STATE_START); - return; + goto done; } if (atomic_add_unless(&d->state, 1, HPD_STATE_OFF)) queue_delayed_work(my_workq, &d->dwork, msecs_to_jiffies(60)); } +done: + mutex_unlock(&hdmi.hdmi_lock); } int hdmi_panel_hpd_handler(int hpd) @@ -265,6 +285,16 @@ err: return r; } +static int hdmi_get_modedb(struct omap_dss_device *dssdev, + struct fb_videomode *modedb, int modedb_len) +{ + struct fb_monspecs *specs = &dssdev->panel.monspecs; + if (specs->modedb_len < modedb_len) + modedb_len = specs->modedb_len; + memcpy(modedb, specs->modedb, sizeof(*modedb) * modedb_len); + return modedb_len; +} + static struct omap_dss_driver hdmi_driver = { .probe = hdmi_panel_probe, .remove = hdmi_panel_remove, @@ -275,6 +305,8 @@ static struct omap_dss_driver hdmi_driver = { .get_timings = hdmi_get_timings, .set_timings = hdmi_set_timings, .check_timings = hdmi_check_timings, + .get_modedb = hdmi_get_modedb, + .set_mode = omapdss_hdmi_display_set_mode, .driver = { .name = "hdmi_panel", .owner = THIS_MODULE, diff --git a/drivers/video/omap2/dsscomp/device.c b/drivers/video/omap2/dsscomp/device.c index 30557b0..50a11a3 100644 --- a/drivers/video/omap2/dsscomp/device.c +++ b/drivers/video/omap2/dsscomp/device.c @@ -306,6 +306,10 @@ static long query_display(struct dsscomp_dev *cdev, dis->state = dev->state; dis->timings = dev->panel.timings; + /* for now LCD panels don't have width and height */ + dis->width_in_mm = dev->panel.monspecs.max_x * 10; + dis->height_in_mm = dev->panel.monspecs.max_y * 10; + /* find all overlays available for/owned by this display */ for (i = 0; i < cdev->num_ovls && dis->enabled; i++) { if (cdev->ovls[i]->manager == mgr) @@ -334,6 +338,9 @@ static long query_display(struct dsscomp_dev *cdev, } dis->mgr.ix = dis->ix; + if (dis->modedb_len && dev->driver->get_modedb) + dis->modedb_len = dev->driver->get_modedb(dev, + (struct fb_videomode *) dis->modedb, dis->modedb_len); return 0; } @@ -344,6 +351,25 @@ static long check_ovl(struct dsscomp_dev *cdev, return (1 << cdev->num_ovls) - 1; } +static long setup_display(struct dsscomp_dev *cdev, + struct dsscomp_setup_display_data *dis) +{ + struct omap_dss_device *dev; + + /* get display */ + if (dis->ix >= cdev->num_displays) + return -EINVAL; + dev = cdev->displays[dis->ix]; + if (!dev) + return -EINVAL; + + if (dev->driver->set_mode) + return dev->driver->set_mode(dev, + (struct fb_videomode *) &dis->mode); + else + return 0; +} + static void fill_cache(struct dsscomp_dev *cdev) { unsigned long i; @@ -393,6 +419,7 @@ static long comp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct dsscomp_setup_dispc_data dispc; struct dsscomp_display_info dis; struct dsscomp_check_ovl_data chk; + struct dsscomp_setup_display_data sdis; } u; dsscomp_gralloc_init(cdev); @@ -418,7 +445,8 @@ static long comp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { r = copy_from_user(&u.dis, ptr, sizeof(u.dis)) ? : query_display(cdev, &u.dis) ? : - copy_to_user(ptr, &u.dis, sizeof(u.dis)); + copy_to_user(ptr, &u.dis, sizeof(u.dis) + + sizeof(*u.dis.modedb) * u.dis.modedb_len); break; } case DSSCOMP_CHECK_OVL: @@ -427,6 +455,11 @@ static long comp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) check_ovl(cdev, &u.chk); break; } + case DSSCOMP_SETUP_DISPLAY: + { + r = copy_from_user(&u.sdis, ptr, sizeof(u.sdis)) ? : + setup_display(cdev, &u.sdis); + } default: r = -EINVAL; } diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig index aa33386..d15486e 100644 --- a/drivers/video/omap2/omapfb/Kconfig +++ b/drivers/video/omap2/omapfb/Kconfig @@ -7,6 +7,7 @@ menuconfig FB_OMAP2 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT + select FB_MODE_HELPERS help Frame buffer driver for OMAP2+ based boards. diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 86d6bb2..24ea1e5 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -1033,6 +1033,41 @@ static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) return r; } +void omapfb_fb2dss_timings(struct fb_videomode *fb_timings, + struct omap_video_timings *dss_timings) +{ + dss_timings->x_res = fb_timings->xres; + dss_timings->y_res = fb_timings->yres; + if (fb_timings->vmode & FB_VMODE_INTERLACED) + dss_timings->y_res /= 2; + dss_timings->pixel_clock = fb_timings->pixclock ? + PICOS2KHZ(fb_timings->pixclock) : 0; + dss_timings->hfp = fb_timings->right_margin; + dss_timings->hbp = fb_timings->left_margin; + dss_timings->hsw = fb_timings->hsync_len; + dss_timings->vfp = fb_timings->lower_margin; + dss_timings->vbp = fb_timings->upper_margin; + dss_timings->vsw = fb_timings->vsync_len; +} +EXPORT_SYMBOL(omapfb_fb2dss_timings); + +void omapfb_dss2fb_timings(struct omap_video_timings *dss_timings, + struct fb_videomode *fb_timings) +{ + memset(fb_timings, 0, sizeof(*fb_timings)); + fb_timings->xres = dss_timings->x_res; + fb_timings->yres = dss_timings->y_res; + fb_timings->pixclock = dss_timings->pixel_clock ? + KHZ2PICOS(dss_timings->pixel_clock) : 0; + fb_timings->right_margin = dss_timings->hfp; + fb_timings->left_margin = dss_timings->hbp; + fb_timings->hsync_len = dss_timings->hsw; + fb_timings->lower_margin = dss_timings->vfp; + fb_timings->upper_margin = dss_timings->vbp; + fb_timings->vsync_len = dss_timings->vsw; +} +EXPORT_SYMBOL(omapfb_dss2fb_timings); + /* set the video mode according to info->var */ static int omapfb_set_par(struct fb_info *fbi) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a6c652e..fd8fcf0 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -208,6 +208,7 @@ struct i2c_client { struct i2c_driver *driver; /* and our access routines */ struct device dev; /* the device structure */ int irq; /* irq issued by device */ + bool ext_master; /* determine if the dev has a master outside mpu */ struct list_head detected; }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -239,6 +240,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) * @archdata: copied into i2c_client.dev.archdata * @of_node: pointer to OpenFirmware device node * @irq: stored in i2c_client.irq + * @ext_master: determine if the dev has a master outside mpu * * I2C doesn't actually support hardware probing, although controllers and * devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's @@ -259,6 +261,7 @@ struct i2c_board_info { struct dev_archdata *archdata; struct device_node *of_node; int irq; + bool ext_master; }; /** @@ -370,6 +373,9 @@ struct i2c_adapter { struct mutex userspace_clients_lock; struct list_head userspace_clients; + + struct mutex ext_clients_lock; /* Lock for external clients list */ + struct list_head ext_clients; /* Clients with master from external proc */ }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) @@ -429,6 +435,7 @@ void i2c_unlock_adapter(struct i2c_adapter *); #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) extern int i2c_add_adapter(struct i2c_adapter *); extern int i2c_del_adapter(struct i2c_adapter *); +extern void i2c_detect_ext_master(struct i2c_adapter *); extern int i2c_add_numbered_adapter(struct i2c_adapter *); extern int i2c_register_driver(struct module *, struct i2c_driver *); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 1584b52..2cfa8d0 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -12,6 +12,7 @@ #include <linux/leds.h> #include <linux/sched.h> +#include <linux/wakelock.h> #include <linux/mmc/core.h> #include <linux/mmc/pm.h> @@ -261,6 +262,7 @@ struct mmc_host { int claim_cnt; /* "claim" nesting count */ struct delayed_work detect; + struct wake_lock detect_wake_lock; const struct mmc_bus_ops *bus_ops; /* current bus driver */ unsigned int bus_refs; /* reference counter */ diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h index c744e93..f12501d 100644 --- a/include/linux/omapfb.h +++ b/include/linux/omapfb.h @@ -260,8 +260,13 @@ extern void omapfb_reserve_sdram_memblock(void); /* helper methods that may be used by other modules */ enum omap_color_mode; +struct omap_video_timings; int omapfb_mode_to_dss_mode(struct fb_var_screeninfo *var, enum omap_color_mode *mode); +void omapfb_fb2dss_timings(struct fb_videomode *fb_timings, + struct omap_video_timings *dss_timings); +void omapfb_dss2fb_timings(struct omap_video_timings *dss_timings, + struct fb_videomode *fb_timings); #endif diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 7b9c549..8076cff 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -153,6 +153,8 @@ struct rproc_ops { int (*set_lat)(struct rproc *rproc, long v); int (*set_bw)(struct rproc *rproc, long v); int (*scale)(struct rproc *rproc, long v); + int (*watchdog_init)(struct rproc *, int (*)(struct rproc *)); + int (*watchdog_exit)(struct rproc *); }; /* @@ -244,9 +246,11 @@ struct rproc { struct mutex lock; struct dentry *dbg_dir; char *trace_buf0, *trace_buf1; + char *last_trace_buf0, *last_trace_buf1; int trace_len0, trace_len1; + int last_trace_len0, last_trace_len1; struct completion firmware_loading_complete; - struct work_struct mmufault_work; + struct work_struct error_work; struct blocking_notifier_head nb_error; struct completion error_comp; #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND @@ -277,5 +281,6 @@ extern const struct dev_pm_ops rproc_gen_pm_ops; #define GENERIC_RPROC_PM_OPS NULL #endif int rproc_set_constraints(struct rproc *, enum rproc_constraint type, long v); +int rproc_errror_notify(struct rproc *rproc); #endif /* REMOTEPROC_H */ diff --git a/include/linux/usb/otg_id.h b/include/linux/usb/otg_id.h index 46a4463..f9f5189 100644 --- a/include/linux/usb/otg_id.h +++ b/include/linux/usb/otg_id.h @@ -52,5 +52,7 @@ int otg_id_register_notifier(struct otg_id_notifier_block *otg_id_nb); void otg_id_unregister_notifier(struct otg_id_notifier_block *otg_id_nb); void otg_id_notify(void); +int otg_id_suspend(void); +void otg_id_resume(void); #endif /* __LINUX_USB_OTG_ID_H */ diff --git a/include/sound/omap-abe-dsp.h b/include/sound/omap-abe-dsp.h index a1d969a..901a55c 100644 --- a/include/sound/omap-abe-dsp.h +++ b/include/sound/omap-abe-dsp.h @@ -12,8 +12,7 @@ #define _OMAP4_ABE_DSP_H struct omap4_abe_dsp_pdata { - /* Return context loss count due to PM states changing */ - int (*get_context_loss_count)(struct device *dev); + bool (*was_context_lost)(struct device *dev); int (*device_scale)(struct device *req_dev, struct device *target_dev, unsigned long rate); diff --git a/include/video/dsscomp.h b/include/video/dsscomp.h index 3ce86d4..4daf3ea 100644 --- a/include/video/dsscomp.h +++ b/include/video/dsscomp.h @@ -107,6 +107,24 @@ struct omap_dss_cpr_coefs { #endif +/* copy of fb_videomode */ +struct dsscomp_videomode { + const char *name; /* optional */ + __u32 refresh; /* optional */ + __u32 xres; + __u32 yres; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 flag; +}; + /* * Stereoscopic Panel types * row, column, overunder, sidebyside options @@ -528,8 +546,9 @@ struct dsscomp_wb_copy_data { /* * ioctl: DSSCOMP_QUERY_DISPLAY, struct dsscomp_display_info * - * Gets informations about the display. Fill in ix before calling - * ioctl, and rest of the fields are filled in by ioctl. + * Gets informations about the display. Fill in ix and modedb_len before + * calling ioctl, and rest of the fields are filled in by ioctl. Up to + * modedb_len timings are retrieved in the order of display preference. * * Returns: 0 on success, non-0 error value on failure. */ @@ -543,6 +562,24 @@ struct dsscomp_display_info { struct omap_video_timings timings; struct s3d_disp_info s3d_info; /* any S3D specific information */ struct dss2_mgr_info mgr; /* manager information */ + __u16 width_in_mm; /* screen dimensions */ + __u16 height_in_mm; + + __u32 modedb_len; /* number of video timings */ + struct dsscomp_videomode modedb[]; /* display supported timings */ +}; + +/* + * ioctl: DSSCOMP_SETUP_DISPLAY, struct dsscomp_setup_display_data + * + * Gets informations about the display. Fill in ix before calling + * ioctl, and rest of the fields are filled in by ioctl. + * + * Returns: 0 on success, non-0 error value on failure. + */ +struct dsscomp_setup_display_data { + __u32 ix; /* display index (sysfs/display#) */ + struct dsscomp_videomode mode; /* video timings */ }; /* @@ -583,6 +620,6 @@ struct dsscomp_wait_data { #define DSSCOMP_QUERY_DISPLAY _IOWR('O', 131, struct dsscomp_display_info) #define DSSCOMP_WAIT _IOW('O', 132, struct dsscomp_wait_data) -#define DSSCOMP_SETUP_DISPC _IOW('O', 127, struct dsscomp_setup_dispc_data) - +#define DSSCOMP_SETUP_DISPC _IOW('O', 133, struct dsscomp_setup_dispc_data) +#define DSSCOMP_SETUP_DISPLAY _IOW('O', 134, struct dsscomp_setup_display_data) #endif diff --git a/include/video/hdmi_ti_4xxx_ip.h b/include/video/hdmi_ti_4xxx_ip.h index cb7f5d8..68af674 100644 --- a/include/video/hdmi_ti_4xxx_ip.h +++ b/include/video/hdmi_ti_4xxx_ip.h @@ -73,8 +73,7 @@ struct hdmi_cm { }; struct hdmi_config { - struct hdmi_timings timings; - u16 interlace; + struct fb_videomode timings; struct hdmi_cm cm; enum hdmi_deep_color_mode deep_color; }; diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 4458cda..fd03280 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -21,6 +21,7 @@ #include <linux/list.h> #include <linux/kobject.h> #include <linux/device.h> +#include <linux/fb.h> #define DISPC_IRQ_FRAMEDONE (1 << 0) #define DISPC_IRQ_VSYNC (1 << 1) @@ -538,6 +539,7 @@ struct omap_dss_device { u16 regm_dsi; u16 lp_clk_div; + unsigned offset_ddr_clk; enum omap_dss_clk_source dsi_fclk_src; } dsi; @@ -555,6 +557,7 @@ struct omap_dss_device { int acb; /* ac-bias pin frequency */ enum omap_panel_config config; + struct fb_monspecs monspecs; } panel; struct { @@ -642,6 +645,11 @@ struct omap_dss_driver { int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); u32 (*get_wss)(struct omap_dss_device *dssdev); + int (*get_modedb)(struct omap_dss_device *dssdev, + struct fb_videomode *modedb, + int modedb_len); + int (*set_mode)(struct omap_dss_device *dssdev, + struct fb_videomode *mode); /* for wrapping around state changes */ void (*disable_orig)(struct omap_dss_device *display); diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 8b658ef..6d91717 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -95,7 +95,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o -obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid.o +obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c index 4571e8d..3ec203e 100644 --- a/net/netfilter/xt_qtaguid.c +++ b/net/netfilter/xt_qtaguid.c @@ -8,36 +8,11 @@ * published by the Free Software Foundation. */ -/* #define DEBUG */ -/* #define IDEBUG */ -/* #define MDEBUG */ -/* #define RDEBUG */ -/* #define CDEBUG */ - -/* Iface handling */ -#ifdef IDEBUG -#define IF_DEBUG(...) pr_debug(__VA_ARGS__) -#else -#define IF_DEBUG(...) no_printk(__VA_ARGS__) -#endif -/* Iptable Matching */ -#ifdef MDEBUG -#define MT_DEBUG(...) pr_debug(__VA_ARGS__) -#else -#define MT_DEBUG(...) no_printk(__VA_ARGS__) -#endif -/* Red-black tree handling */ -#ifdef RDEBUG -#define RB_DEBUG(...) pr_debug(__VA_ARGS__) -#else -#define RB_DEBUG(...) no_printk(__VA_ARGS__) -#endif -/* procfs ctrl/stats handling */ -#ifdef CDEBUG -#define CT_DEBUG(...) pr_debug(__VA_ARGS__) -#else -#define CT_DEBUG(...) no_printk(__VA_ARGS__) -#endif +/* + * There are run-time debug flags enabled via the debug_mask module param, or + * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h. + */ +#define DEBUG #include <linux/file.h> #include <linux/inetdevice.h> @@ -52,6 +27,9 @@ #include <net/udp.h> #include <linux/netfilter/xt_socket.h> +#include "xt_qtaguid_internal.h" +#include "xt_qtaguid_print.h" + /* * We only use the xt_socket funcs within a similar context to avoid unexpected * return values. @@ -93,201 +71,158 @@ module_param_named(ctrl_write_gid, proc_ctrl_write_gid, uint, S_IRUGO | S_IWUSR); /* + * Limit the number of active tags (via socket tags) for a given UID. + * Multiple processes could share the UID. + */ +static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS; +module_param(max_sock_tags, int, S_IRUGO | S_IWUSR); + +/* * After the kernel has initiallized this module, it is still possible - * to make it passive: - * - do not register it via iptables. - * the matching code will not be invoked. - * - set passive to 0 - * the iface stats handling will not be act on notifications. + * to make it passive. + * Setting passive to Y: + * - the iface stats handling will not act on notifications. + * - iptables matches will never match. + * - ctrl commands silently succeed. + * - stats are always empty. * This is mostly usefull when a bug is suspected. */ static bool module_passive; module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR); -/*---------------------------------------------------------------------------*/ /* - * Tags: - * - * They represent what the data usage counters will be tracked against. - * By default a tag is just based on the UID. - * The UID is used as the base for policying, and can not be ignored. - * So a tag will always at least represent a UID (uid_tag). - * - * A tag can be augmented with an "accounting tag" which is associated - * with a UID. - * User space can set the acct_tag portion of the tag which is then used - * with sockets: all data belong to that socket will be counted against the - * tag. The policing is then based on the tag's uid_tag portion, - * and stats are collected for the acct_tag portion seperately. - * - * There could be - * a: {acct_tag=1, uid_tag=10003} - * b: {acct_tag=2, uid_tag=10003} - * c: {acct_tag=3, uid_tag=10003} - * d: {acct_tag=0, uid_tag=10003} - * (a, b, and c represent tags associated with specific sockets. - * d is for the totals for that uid, including all untagged traffic. - * Typically d is used with policing/quota rules. - * - * We want tag_t big enough to distinguish uid_t and acct_tag. - * It might become a struct if needed. - * Nothing should be using it as an int. + * Control how qtaguid data is tracked per proc/uid. + * Setting tag_tracking_passive to Y: + * - don't create proc specific structs to track tags + * - don't check that active tag stats exceed some limits. + * - don't clean up socket tags on process exits. + * This is mostly usefull when a bug is suspected. */ -typedef uint64_t tag_t; /* Only used via accessors */ +static bool qtu_proc_handling_passive; +module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool, + S_IRUGO | S_IWUSR); + +#define QTU_DEV_NAME "xt_qtaguid" + +uint debug_mask = DEFAULT_DEBUG_MASK; +module_param(debug_mask, uint, S_IRUGO | S_IWUSR); + +/*---------------------------------------------------------------------------*/ static const char *iface_stat_procdirname = "iface_stat"; static struct proc_dir_entry *iface_stat_procdir; - /* - * For now we only track 2 sets of counters. - * The default set is 0. - * Userspace can activate another set for a given uid being tracked. + * Ordering of locks: + * outer locks: + * iface_stat_list_lock + * sock_tag_list_lock + * inner locks: + * uid_tag_data_tree_lock + * tag_counter_set_list_lock + * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock + * is acquired. + * + * Call tree with all lock holders as of 2011-09-06: + * + * qtaguid_ctrl_parse() + * ctrl_cmd_delete() + * sock_tag_list_lock + * tag_counter_set_list_lock + * iface_stat_list_lock + * iface_entry->tag_stat_list_lock + * uid_tag_data_tree_lock + * ctrl_cmd_counter_set() + * tag_counter_set_list_lock + * ctrl_cmd_tag() + * sock_tag_list_lock + * get_tag_ref() + * uid_tag_data_tree_lock + * uid_tag_data_tree_lock + * ctrl_cmd_untag() + * sock_tag_list_lock + * uid_tag_data_tree_lock + * + * qtaguid_mt() + * account_for_uid() + * if_tag_stat_update() + * get_sock_stat() + * sock_tag_list_lock + * iface_entry->tag_stat_list_lock + * tag_stat_update() + * get_active_counter_set() + * tag_counter_set_list_lock + * + * iface_netdev_event_handler() + * iface_stat_create() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * iface_inet6addr_event_handler() + * iface_stat_create_ipv6() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * iface_inetaddr_event_handler() + * iface_stat_create() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * qtaguid_ctrl_proc_read() + * sock_tag_list_lock + * sock_tag_list_lock + * uid_tag_data_tree_lock + * iface_stat_list_lock + * + * qtaguid_stats_proc_read() + * iface_stat_list_lock + * iface_entry->tag_stat_list_lock + * + * qtudev_open() + * uid_tag_data_tree_lock + * + * qtud_dev_release() + * sock_tag_list_lock + * uid_tag_data_tree_lock */ -#define IFS_MAX_COUNTER_SETS 2 - -enum ifs_tx_rx { - IFS_TX, - IFS_RX, - IFS_MAX_DIRECTIONS -}; - -/* For now, TCP, UDP, the rest */ -enum ifs_proto { - IFS_TCP, - IFS_UDP, - IFS_PROTO_OTHER, - IFS_MAX_PROTOS -}; - -struct byte_packet_counters { - uint64_t bytes; - uint64_t packets; -}; - -struct data_counters { - struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS]; -}; - -/* Generic tag based node used as a base for rb_tree ops. */ -struct tag_node { - struct rb_node node; - tag_t tag; -}; - -struct tag_stat { - struct tag_node tn; - struct data_counters counters; - /* - * If this tag is acct_tag based, we need to count against the - * matching parent uid_tag. - */ - struct data_counters *parent_counters; -}; - -struct iface_stat { - struct list_head list; - char *ifname; - uint64_t rx_bytes; - uint64_t rx_packets; - uint64_t tx_bytes; - uint64_t tx_packets; - bool active; - struct proc_dir_entry *proc_ptr; - - struct rb_root tag_stat_tree; - spinlock_t tag_stat_list_lock; -}; - static LIST_HEAD(iface_stat_list); static DEFINE_SPINLOCK(iface_stat_list_lock); -/* This is needed to create proc_dir_entries from atomic context. */ -struct iface_stat_work { - struct work_struct iface_work; - struct iface_stat *iface_entry; -}; - -/* - * Track tag that this socket is transferring data for, and not necessarily - * the uid that owns the socket. - * This is the tag against which tag_stat.counters will be billed. - */ -struct sock_tag { - struct rb_node sock_node; - struct sock *sk; /* Only used as a number, never dereferenced */ - /* The socket is needed for sockfd_put() */ - struct socket *socket; - - tag_t tag; -}; - -struct qtaguid_event_counts { - /* Various successful events */ - atomic64_t sockets_tagged; - atomic64_t sockets_untagged; - atomic64_t counter_set_changes; - atomic64_t delete_cmds; - atomic64_t iface_events; /* Number of NETDEV_* events handled */ - /* - * match_found_sk_*: numbers related to the netfilter matching - * function finding a sock for the sk_buff. - */ - atomic64_t match_found_sk; /* An sk was already in the sk_buff. */ - /* The connection tracker had the sk. */ - atomic64_t match_found_sk_in_ct; - /* - * No sk could be found. No apparent owner. Could happen with - * unsolicited traffic. - */ - atomic64_t match_found_sk_none; -}; -static struct qtaguid_event_counts qtu_events; - static struct rb_root sock_tag_tree = RB_ROOT; static DEFINE_SPINLOCK(sock_tag_list_lock); -/* Track the set active_set for the given tag. */ -struct tag_counter_set { - struct tag_node tn; - int active_set; -}; - static struct rb_root tag_counter_set_tree = RB_ROOT; static DEFINE_SPINLOCK(tag_counter_set_list_lock); -static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par); +static struct rb_root uid_tag_data_tree = RB_ROOT; +static DEFINE_SPINLOCK(uid_tag_data_tree_lock); + +static struct rb_root proc_qtu_data_tree = RB_ROOT; +/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */ +static struct qtaguid_event_counts qtu_events; /*----------------------------------------------*/ -static inline int tag_compare(tag_t t1, tag_t t2) +static bool can_manipulate_uids(void) { - return t1 < t2 ? -1 : t1 == t2 ? 0 : 1; + /* root pwnd */ + return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid) + || in_egroup_p(proc_ctrl_write_gid); } -static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid) -{ - return acct_tag | uid; -} -static inline tag_t make_tag_from_uid(uid_t uid) -{ - return uid; -} -static inline uid_t get_uid_from_tag(tag_t tag) -{ - return tag & 0xFFFFFFFFULL; -} -static inline tag_t get_utag_from_tag(tag_t tag) -{ - return tag & 0xFFFFFFFFULL; -} -static inline tag_t get_atag_from_tag(tag_t tag) +static bool can_impersonate_uid(uid_t uid) { - return tag & ~0xFFFFFFFFULL; + return uid == current_fsuid() || can_manipulate_uids(); } -static inline bool valid_atag(tag_t tag) +static bool can_read_other_uid_stats(uid_t uid) { - return !(tag & 0xFFFFFFFFULL); + /* root pwnd */ + return unlikely(!current_fsuid()) || uid == current_fsuid() + || unlikely(!proc_stats_readall_gid) + || in_egroup_p(proc_stats_readall_gid); } static inline void dc_add_byte_packets(struct data_counters *counters, int set, @@ -324,12 +259,13 @@ static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag) while (node) { struct tag_node *data = rb_entry(node, struct tag_node, node); - int result = tag_compare(tag, data->tag); - RB_DEBUG("qtaguid: tag_node_tree_search(): tag=0x%llx" - " (uid=%d)\n", - data->tag, - get_uid_from_tag(data->tag)); - + int result; + RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): " + " node=%p data=%p\n", tag, node, data); + result = tag_compare(tag, data->tag); + RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): " + " data.tag=0x%llx (uid=%u) res=%d\n", + tag, data->tag, get_uid_from_tag(data->tag), result); if (result < 0) node = node->rb_left; else if (result > 0) @@ -349,8 +285,8 @@ static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root) struct tag_node *this = rb_entry(*new, struct tag_node, node); int result = tag_compare(data->tag, this->tag); - RB_DEBUG("qtaguid: tag_node_tree_insert(): tag=0x%llx" - " (uid=%d)\n", + RB_DEBUG("qtaguid: %s(): tag=0x%llx" + " (uid=%u)\n", __func__, this->tag, get_uid_from_tag(this->tag)); parent = *new; @@ -396,6 +332,19 @@ static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root, } +static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root) +{ + tag_node_tree_insert(&data->tn, root); +} + +static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag) +{ + struct tag_node *node = tag_node_tree_search(root, tag); + if (!node) + return NULL; + return rb_entry(&node->node, struct tag_ref, tn.node); +} + static struct sock_tag *sock_tag_tree_search(struct rb_root *root, const struct sock *sk) { @@ -404,10 +353,9 @@ static struct sock_tag *sock_tag_tree_search(struct rb_root *root, while (node) { struct sock_tag *data = rb_entry(node, struct sock_tag, sock_node); - ptrdiff_t result = sk - data->sk; - if (result < 0) + if (sk < data->sk) node = node->rb_left; - else if (result > 0) + else if (sk > data->sk) node = node->rb_right; else return data; @@ -423,11 +371,10 @@ static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root) while (*new) { struct sock_tag *this = rb_entry(*new, struct sock_tag, sock_node); - ptrdiff_t result = data->sk - this->sk; parent = *new; - if (result < 0) + if (data->sk < this->sk) new = &((*new)->rb_left); - else if (result > 0) + else if (data->sk > this->sk) new = &((*new)->rb_right); else BUG(); @@ -438,6 +385,292 @@ static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root) rb_insert_color(&data->sock_node, root); } +static void sock_tag_tree_erase(struct rb_root *st_to_free_tree) +{ + struct rb_node *node; + struct sock_tag *st_entry; + + node = rb_first(st_to_free_tree); + while (node) { + st_entry = rb_entry(node, struct sock_tag, sock_node); + node = rb_next(node); + CT_DEBUG("qtaguid: %s(): " + "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__, + st_entry->sk, + st_entry->tag, + get_uid_from_tag(st_entry->tag)); + rb_erase(&st_entry->sock_node, st_to_free_tree); + sockfd_put(st_entry->socket); + kfree(st_entry); + } +} + +static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root, + const pid_t pid) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct proc_qtu_data *data = rb_entry(node, + struct proc_qtu_data, + node); + if (pid < data->pid) + node = node->rb_left; + else if (pid > data->pid) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void proc_qtu_data_tree_insert(struct proc_qtu_data *data, + struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct proc_qtu_data *this = rb_entry(*new, + struct proc_qtu_data, + node); + parent = *new; + if (data->pid < this->pid) + new = &((*new)->rb_left); + else if (data->pid > this->pid) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static void uid_tag_data_tree_insert(struct uid_tag_data *data, + struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct uid_tag_data *this = rb_entry(*new, + struct uid_tag_data, + node); + parent = *new; + if (data->uid < this->uid) + new = &((*new)->rb_left); + else if (data->uid > this->uid) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root, + uid_t uid) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct uid_tag_data *data = rb_entry(node, + struct uid_tag_data, + node); + if (uid < data->uid) + node = node->rb_left; + else if (uid > data->uid) + node = node->rb_right; + else + return data; + } + return NULL; +} + +/* + * Allocates a new uid_tag_data struct if needed. + * Returns a pointer to the found or allocated uid_tag_data. + * Returns a PTR_ERR on failures, and lock is not held. + * If found is not NULL: + * sets *found to true if not allocated. + * sets *found to false if allocated. + */ +struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res) +{ + struct uid_tag_data *utd_entry; + + /* Look for top level uid_tag_data for the UID */ + utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid); + DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry); + + if (found_res) + *found_res = utd_entry; + if (utd_entry) + return utd_entry; + + utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC); + if (!utd_entry) { + pr_err("qtaguid: get_uid_data(%u): " + "tag data alloc failed\n", uid); + return ERR_PTR(-ENOMEM); + } + + utd_entry->uid = uid; + utd_entry->tag_ref_tree = RB_ROOT; + uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree); + DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry); + return utd_entry; +} + +/* Never returns NULL. Either PTR_ERR or a valid ptr. */ +static struct tag_ref *new_tag_ref(tag_t new_tag, + struct uid_tag_data *utd_entry) +{ + struct tag_ref *tr_entry; + int res; + + if (utd_entry->num_active_tags + 1 > max_sock_tags) { + pr_info("qtaguid: new_tag_ref(0x%llx): " + "tag ref alloc quota exceeded. max=%d\n", + new_tag, max_sock_tags); + res = -EMFILE; + goto err_res; + + } + + tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC); + if (!tr_entry) { + pr_err("qtaguid: new_tag_ref(0x%llx): " + "tag ref alloc failed\n", + new_tag); + res = -ENOMEM; + goto err_res; + } + tr_entry->tn.tag = new_tag; + /* tr_entry->num_sock_tags handled by caller */ + utd_entry->num_active_tags++; + tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree); + DR_DEBUG("qtaguid: new_tag_ref(0x%llx): " + " inserted new tag ref\n", + new_tag); + return tr_entry; + +err_res: + return ERR_PTR(res); +} + +static struct tag_ref *lookup_tag_ref(tag_t full_tag, + struct uid_tag_data **utd_res) +{ + struct uid_tag_data *utd_entry; + struct tag_ref *tr_entry; + bool found_utd; + uid_t uid = get_uid_from_tag(full_tag); + + DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n", + full_tag, uid); + + utd_entry = get_uid_data(uid, &found_utd); + if (IS_ERR_OR_NULL(utd_entry)) { + if (utd_res) + *utd_res = utd_entry; + return NULL; + } + + tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag); + if (utd_res) + *utd_res = utd_entry; + DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n", + full_tag, utd_entry, tr_entry); + return tr_entry; +} + +/* Never returns NULL. Either PTR_ERR or a valid ptr. */ +static struct tag_ref *get_tag_ref(tag_t full_tag, + struct uid_tag_data **utd_res) +{ + struct uid_tag_data *utd_entry; + struct tag_ref *tr_entry; + + DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n", + full_tag); + spin_lock_bh(&uid_tag_data_tree_lock); + tr_entry = lookup_tag_ref(full_tag, &utd_entry); + BUG_ON(IS_ERR_OR_NULL(utd_entry)); + if (!tr_entry) + tr_entry = new_tag_ref(full_tag, utd_entry); + + spin_unlock_bh(&uid_tag_data_tree_lock); + if (utd_res) + *utd_res = utd_entry; + DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n", + full_tag, utd_entry, tr_entry); + return tr_entry; +} + +/* Checks and maybe frees the UID Tag Data entry */ +static void put_utd_entry(struct uid_tag_data *utd_entry) +{ + /* Are we done with the UID tag data entry? */ + if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree)) { + DR_DEBUG("qtaguid: %s(): " + "erase utd_entry=%p uid=%u " + "by pid=%u tgid=%u uid=%u\n", __func__, + utd_entry, utd_entry->uid, + current->pid, current->tgid, current_fsuid()); + BUG_ON(utd_entry->num_active_tags); + rb_erase(&utd_entry->node, &uid_tag_data_tree); + kfree(utd_entry); + } else { + DR_DEBUG("qtaguid: %s(): " + "utd_entry=%p still has %d tags\n", __func__, + utd_entry, utd_entry->num_active_tags); + BUG_ON(!utd_entry->num_active_tags); + } +} + +/* + * If no sock_tags are using this tag_ref, + * decrements refcount of utd_entry, removes tr_entry + * from utd_entry->tag_ref_tree and frees. + */ +static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry, + struct uid_tag_data *utd_entry) +{ + DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__, + tr_entry, tr_entry->tn.tag, + get_uid_from_tag(tr_entry->tn.tag)); + if (!tr_entry->num_sock_tags) { + BUG_ON(!utd_entry->num_active_tags); + utd_entry->num_active_tags--; + rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree); + DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry); + kfree(tr_entry); + } +} + +static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry) +{ + struct rb_node *node; + struct tag_ref *tr_entry; + tag_t acct_tag; + + DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__, + full_tag, get_uid_from_tag(full_tag)); + acct_tag = get_atag_from_tag(full_tag); + node = rb_first(&utd_entry->tag_ref_tree); + while (node) { + tr_entry = rb_entry(node, struct tag_ref, tn.node); + node = rb_next(node); + if (!acct_tag || tr_entry->tn.tag == full_tag) + free_tag_ref_from_utd_entry(tr_entry, utd_entry); + } +} + static int read_proc_u64(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -843,8 +1076,8 @@ static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry, tag_t tag) { struct tag_stat *new_tag_stat_entry = NULL; - IF_DEBUG("qtaguid: iface_stat: create_if_tag_stat(): ife=%p tag=0x%llx" - " (uid=%u)\n", + IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx" + " (uid=%u)\n", __func__, iface_entry, tag, get_uid_from_tag(tag)); new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC); if (!new_tag_stat_entry) { @@ -894,9 +1127,9 @@ static void if_tag_stat_update(const char *ifname, uid_t uid, acct_tag = get_atag_from_tag(tag); uid_tag = get_utag_from_tag(tag); } else { - uid_tag = make_tag_from_uid(uid); - acct_tag = 0; + acct_tag = make_atag_from_value(0); tag = combine_atag_with_uid(acct_tag, uid); + uid_tag = make_tag_from_uid(uid); } MT_DEBUG("qtaguid: iface_stat: stat_update(): " " looking for tag=0x%llx (uid=%u) in ife=%p\n", @@ -1289,22 +1522,23 @@ static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned, char *outp = page; int len; uid_t uid; - struct sock_tag *sock_tag_entry; struct rb_node *node; + struct sock_tag *sock_tag_entry; int item_index = 0; + int indent_level = 0; + long f_count; if (unlikely(module_passive)) { *eof = 1; return 0; } - /* TODO: support skipping num_items_returned on entry. */ - CT_DEBUG("qtaguid: proc ctrl page=%p off=%ld char_count=%d *eof=%d\n", - page, items_to_skip, char_count, *eof); - if (*eof) return 0; + CT_DEBUG("qtaguid: proc ctrl page=%p off=%ld char_count=%d *eof=%d\n", + page, items_to_skip, char_count, *eof); + spin_lock_bh(&sock_tag_list_lock); for (node = rb_first(&sock_tag_tree); node; @@ -1313,14 +1547,21 @@ static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned, continue; sock_tag_entry = rb_entry(node, struct sock_tag, sock_node); uid = get_uid_from_tag(sock_tag_entry->tag); - CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u)\n", + CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) " + "pid=%u\n", sock_tag_entry->sk, sock_tag_entry->tag, - uid + uid, + sock_tag_entry->pid ); + f_count = atomic_long_read( + &sock_tag_entry->socket->file->f_count); len = snprintf(outp, char_count, - "sock=%p tag=0x%llx (uid=%u)\n", - sock_tag_entry->sk, sock_tag_entry->tag, uid); + "sock=%p tag=0x%llx (uid=%u) pid=%u " + "f_count=%lu\n", + sock_tag_entry->sk, + sock_tag_entry->tag, uid, + sock_tag_entry->pid, f_count); if (len >= char_count) { spin_unlock_bh(&sock_tag_list_lock); *outp = '\0'; @@ -1359,28 +1600,31 @@ static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned, (*num_items_returned)++; } - *eof = 1; - return outp - page; -} +#ifdef CDEBUG + /* Count the following as part of the last item_index */ + if (item_index > items_to_skip) { + CT_DEBUG("qtaguid: proc ctrl state debug {\n"); + spin_lock_bh(&sock_tag_list_lock); + prdebug_sock_tag_tree(indent_level, &sock_tag_tree); + spin_unlock_bh(&sock_tag_list_lock); -static bool can_manipulate_uids(void) -{ - /* root pwnd */ - return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid) - || in_egroup_p(proc_ctrl_write_gid); -} + spin_lock_bh(&uid_tag_data_tree_lock); + prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree); + prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree); + spin_unlock_bh(&uid_tag_data_tree_lock); -static bool can_impersonate_uid(uid_t uid) -{ - return uid == current_fsuid() || can_manipulate_uids(); -} + spin_lock_bh(&iface_stat_list_lock); + prdebug_iface_stat_list(indent_level, &iface_stat_list); + spin_unlock_bh(&iface_stat_list_lock); -static bool can_read_other_uid_stats(uid_t uid) -{ - /* root pwnd */ - return unlikely(!current_fsuid()) || uid == current_fsuid() - || unlikely(!proc_stats_readall_gid) - || in_egroup_p(proc_stats_readall_gid); + CT_DEBUG("qtaguid: proc ctrl state debug }\n"); + + + } +#endif + + *eof = 1; + return outp - page; } /* @@ -1401,6 +1645,8 @@ static int ctrl_cmd_delete(const char *input) struct rb_root st_to_free_tree = RB_ROOT; struct tag_stat *ts_entry; struct tag_counter_set *tcs_entry; + struct tag_ref *tr_entry; + struct uid_tag_data *utd_entry; argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid); CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c " @@ -1419,12 +1665,17 @@ static int ctrl_cmd_delete(const char *input) uid = current_fsuid(); } else if (!can_impersonate_uid(uid)) { pr_info("qtaguid: ctrl_delete(%s): " - "insufficient priv from pid=%u uid=%u\n", - input, current->pid, current_fsuid()); + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); res = -EPERM; goto err; } + tag = combine_atag_with_uid(acct_tag, uid); + CT_DEBUG("qtaguid: ctrl_delete(): " + "looking for tag=0x%llx (uid=%u)\n", + tag, uid); + /* Delete socket tags */ spin_lock_bh(&sock_tag_list_lock); node = rb_first(&sock_tag_tree); @@ -1435,32 +1686,25 @@ static int ctrl_cmd_delete(const char *input) if (entry_uid != uid) continue; + CT_DEBUG("qtaguid: ctrl_delete(): st tag=0x%llx (uid=%u)\n", + st_entry->tag, entry_uid); + if (!acct_tag || st_entry->tag == tag) { rb_erase(&st_entry->sock_node, &sock_tag_tree); /* Can't sockfd_put() within spinlock, do it later. */ sock_tag_tree_insert(st_entry, &st_to_free_tree); + tr_entry = lookup_tag_ref(st_entry->tag, NULL); + BUG_ON(tr_entry->num_sock_tags <= 0); + tr_entry->num_sock_tags--; } } spin_unlock_bh(&sock_tag_list_lock); - node = rb_first(&st_to_free_tree); - while (node) { - st_entry = rb_entry(node, struct sock_tag, sock_node); - node = rb_next(node); - CT_DEBUG("qtaguid: ctrl_delete(): " - "erase st: sk=%p tag=0x%llx (uid=%u)\n", - st_entry->sk, - st_entry->tag, - entry_uid); - rb_erase(&st_entry->sock_node, &st_to_free_tree); - sockfd_put(st_entry->socket); - kfree(st_entry); - } - - tag = combine_atag_with_uid(acct_tag, uid); + sock_tag_tree_erase(&st_to_free_tree); /* Delete tag counter-sets */ spin_lock_bh(&tag_counter_set_list_lock); + /* Counter sets are only on the uid tag, not full tag */ tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag); if (tcs_entry) { CT_DEBUG("qtaguid: ctrl_delete(): " @@ -1485,6 +1729,11 @@ static int ctrl_cmd_delete(const char *input) ts_entry = rb_entry(node, struct tag_stat, tn.node); entry_uid = get_uid_from_tag(ts_entry->tn.tag); node = rb_next(node); + + CT_DEBUG("qtaguid: ctrl_delete(): " + "ts tag=0x%llx (uid=%u)\n", + ts_entry->tn.tag, entry_uid); + if (entry_uid != uid) continue; if (!acct_tag || ts_entry->tn.tag == tag) { @@ -1501,6 +1750,30 @@ static int ctrl_cmd_delete(const char *input) spin_unlock_bh(&iface_entry->tag_stat_list_lock); } spin_unlock_bh(&iface_stat_list_lock); + + /* Cleanup the uid_tag_data */ + spin_lock_bh(&uid_tag_data_tree_lock); + node = rb_first(&uid_tag_data_tree); + while (node) { + utd_entry = rb_entry(node, struct uid_tag_data, node); + entry_uid = utd_entry->uid; + node = rb_next(node); + + CT_DEBUG("qtaguid: ctrl_delete(): " + "utd uid=%u\n", + entry_uid); + + if (entry_uid != uid) + continue; + /* + * Go over the tag_refs, and those that don't have + * sock_tags using them are freed. + */ + put_tag_ref_tree(tag, utd_entry); + put_utd_entry(utd_entry); + } + spin_unlock_bh(&uid_tag_data_tree_lock); + atomic64_inc(&qtu_events.delete_cmds); res = 0; @@ -1533,8 +1806,8 @@ static int ctrl_cmd_counter_set(const char *input) } if (!can_manipulate_uids()) { pr_info("qtaguid: ctrl_counterset(%s): " - "insufficient priv from pid=%u uid=%u\n", - input, current->pid, current_fsuid()); + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); res = -EPERM; goto err; } @@ -1572,11 +1845,14 @@ static int ctrl_cmd_tag(const char *input) char cmd; int sock_fd = 0; uid_t uid = 0; - tag_t acct_tag = 0; + tag_t acct_tag = make_atag_from_value(0); + tag_t full_tag; struct socket *el_socket; - int refcnt = -1; int res, argc; struct sock_tag *sock_tag_entry; + struct tag_ref *tag_ref_entry; + struct uid_tag_data *uid_tag_data_entry; + struct proc_qtu_data *pqd_entry; /* Unassigned args will get defaulted later. */ argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid); @@ -1593,44 +1869,66 @@ static int ctrl_cmd_tag(const char *input) " sock_fd=%d err=%d\n", input, sock_fd, res); goto err; } - refcnt = atomic_read(&el_socket->file->f_count); - CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%d\n", - input, refcnt); + CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n", + input, atomic_long_read(&el_socket->file->f_count), + el_socket->sk); if (argc < 3) { - acct_tag = 0; + acct_tag = make_atag_from_value(0); } else if (!valid_atag(acct_tag)) { pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input); res = -EINVAL; goto err_put; } CT_DEBUG("qtaguid: ctrl_tag(%s): " - "uid=%u euid=%u fsuid=%u " + "pid=%u tgid=%u uid=%u euid=%u fsuid=%u " "in_group=%d in_egroup=%d\n", - input, current_uid(), current_euid(), current_fsuid(), - in_group_p(proc_stats_readall_gid), - in_egroup_p(proc_stats_readall_gid)); + input, current->pid, current->tgid, current_uid(), + current_euid(), current_fsuid(), + in_group_p(proc_ctrl_write_gid), + in_egroup_p(proc_ctrl_write_gid)); if (argc < 4) { uid = current_fsuid(); } else if (!can_impersonate_uid(uid)) { pr_info("qtaguid: ctrl_tag(%s): " - "insufficient priv from pid=%u uid=%u\n", - input, current->pid, current_fsuid()); + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); res = -EPERM; goto err_put; } + full_tag = combine_atag_with_uid(acct_tag, uid); spin_lock_bh(&sock_tag_list_lock); sock_tag_entry = get_sock_stat_nl(el_socket->sk); + tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry); + if (IS_ERR(tag_ref_entry)) { + res = PTR_ERR(tag_ref_entry); + spin_unlock_bh(&sock_tag_list_lock); + goto err_put; + } + tag_ref_entry->num_sock_tags++; if (sock_tag_entry) { + struct tag_ref *prev_tag_ref_entry; + + CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p " + "st@%p ...->f_count=%ld\n", + input, el_socket->sk, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count)); /* * This is a re-tagging, so release the sock_fd that was * locked at the time of the 1st tagging. + * There is still the ref from this call's sockfd_lookup() so + * it can be done within the spinlock. */ sockfd_put(sock_tag_entry->socket); - refcnt--; - sock_tag_entry->tag = combine_atag_with_uid(acct_tag, - uid); + prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, + &uid_tag_data_entry); + BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry)); + BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0); + prev_tag_ref_entry->num_sock_tags--; + sock_tag_entry->tag = full_tag; } else { + CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n", + input, el_socket->sk); sock_tag_entry = kzalloc(sizeof(*sock_tag_entry), GFP_ATOMIC); if (!sock_tag_entry) { @@ -1639,29 +1937,54 @@ static int ctrl_cmd_tag(const char *input) input); spin_unlock_bh(&sock_tag_list_lock); res = -ENOMEM; - goto err_put; + goto err_tag_unref_put; } sock_tag_entry->sk = el_socket->sk; sock_tag_entry->socket = el_socket; + sock_tag_entry->pid = current->tgid; sock_tag_entry->tag = combine_atag_with_uid(acct_tag, uid); + spin_lock_bh(&uid_tag_data_tree_lock); + pqd_entry = proc_qtu_data_tree_search( + &proc_qtu_data_tree, current->tgid); + /* + * TODO: remove if, and start failing. + * At first, we want to catch user-space code that is not + * opening the /dev/xt_qtaguid. + */ + WARN_ONCE(IS_ERR_OR_NULL(pqd_entry), + "qtaguid: User space forgot to open /dev/xt_qtaguid? " + "pid=%u tgid=%u uid=%u\n", + current->pid, current->tgid, current_fsuid()); + if (!IS_ERR_OR_NULL(pqd_entry)) { + list_add(&sock_tag_entry->list, + &pqd_entry->sock_tag_list); + } + spin_unlock_bh(&uid_tag_data_tree_lock); + sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree); atomic64_inc(&qtu_events.sockets_tagged); } spin_unlock_bh(&sock_tag_list_lock); /* We keep the ref to the socket (file) until it is untagged */ - CT_DEBUG("qtaguid: ctrl_tag(%s): done. socket->...->f_count=%d\n", - input, - el_socket ? atomic_read(&el_socket->file->f_count) : -1); + CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n", + input, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count)); return 0; +err_tag_unref_put: + BUG_ON(tag_ref_entry->num_sock_tags <= 0); + tag_ref_entry->num_sock_tags--; + free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry); err_put: + CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n", + input, atomic_long_read(&el_socket->file->f_count) - 1); /* Release the sock_fd that was grabbed by sockfd_lookup(). */ sockfd_put(el_socket); - refcnt--; + return res; + err: - CT_DEBUG("qtaguid: ctrl_tag(%s): done. socket->...->f_count=%d\n", - input, refcnt); + CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input); return res; } @@ -1670,9 +1993,11 @@ static int ctrl_cmd_untag(const char *input) char cmd; int sock_fd = 0; struct socket *el_socket; - int refcnt = -1; int res, argc; struct sock_tag *sock_tag_entry; + struct tag_ref *tag_ref_entry; + struct uid_tag_data *utd_entry; + struct proc_qtu_data *pqd_entry; argc = sscanf(input, "%c %d", &cmd, &sock_fd); CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n", @@ -1687,9 +2012,9 @@ static int ctrl_cmd_untag(const char *input) " sock_fd=%d err=%d\n", input, sock_fd, res); goto err; } - refcnt = atomic_read(&el_socket->file->f_count); - CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%d\n", - input, refcnt); + CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n", + input, atomic_long_read(&el_socket->file->f_count), + el_socket->sk); spin_lock_bh(&sock_tag_list_lock); sock_tag_entry = get_sock_stat_nl(el_socket->sk); if (!sock_tag_entry) { @@ -1703,31 +2028,56 @@ static int ctrl_cmd_untag(const char *input) */ rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree); + tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry); + BUG_ON(!tag_ref_entry); + BUG_ON(tag_ref_entry->num_sock_tags <= 0); + spin_lock_bh(&uid_tag_data_tree_lock); + pqd_entry = proc_qtu_data_tree_search( + &proc_qtu_data_tree, current->tgid); + /* + * TODO: remove if, and start failing. + * At first, we want to catch user-space code that is not + * opening the /dev/xt_qtaguid. + */ + WARN_ONCE(IS_ERR_OR_NULL(pqd_entry), + "qtaguid: User space forgot to open /dev/xt_qtaguid? " + "pid=%u tgid=%u uid=%u\n", + current->pid, current->tgid, current_fsuid()); + if (!IS_ERR_OR_NULL(pqd_entry)) + list_del(&sock_tag_entry->list); + spin_unlock_bh(&uid_tag_data_tree_lock); + /* + * We don't free tag_ref from the utd_entry here, + * only during a cmd_delete(). + */ + tag_ref_entry->num_sock_tags--; + spin_unlock_bh(&sock_tag_list_lock); /* * Release the sock_fd that was grabbed at tag time, * and once more for the sockfd_lookup() here. */ sockfd_put(sock_tag_entry->socket); - spin_unlock_bh(&sock_tag_list_lock); + CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n", + input, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count) - 1); sockfd_put(el_socket); - refcnt -= 2; + kfree(sock_tag_entry); <<<<<<< HEAD ======= atomic64_inc(&qtu_events.sockets_untagged); ->>>>>>> android-omap-3.0 - CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%d\n", - input, refcnt); return 0; err_put: + CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n", + input, atomic_long_read(&el_socket->file->f_count) - 1); /* Release the sock_fd that was grabbed by sockfd_lookup(). */ sockfd_put(el_socket); - refcnt--; + return res; + err: - CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%d\n", - input, refcnt); + CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input); return res; } @@ -1819,11 +2169,11 @@ static int pp_stats_line(struct proc_print_info *ppi, int cnt_set) if (!can_read_other_uid_stats(stat_uid)) { CT_DEBUG("qtaguid: stats line: " - "%s 0x%llx %u: " - "insufficient priv from pid=%u uid=%u\n", + "%s 0x%llx %u: insufficient priv " + "from pid=%u tgid=%u uid=%u\n", ppi->iface_entry->ifname, get_atag_from_tag(tag), stat_uid, - current->pid, current_fsuid()); + current->pid, current->tgid, current_fsuid()); return 0; } if (ppi->item_index++ < ppi->items_to_skip) @@ -1906,7 +2256,7 @@ static int qtaguid_stats_proc_read(char *page, char **num_items_returned, if (unlikely(module_passive)) { len = pp_stats_line(&ppi, 0); /* The header should always be shorter than the buffer. */ - WARN_ON(len >= ppi.char_count); + BUG_ON(len >= ppi.char_count); (*num_items_returned)++; *eof = 1; return len; @@ -1956,6 +2306,163 @@ static int qtaguid_stats_proc_read(char *page, char **num_items_returned, } /*------------------------------------------*/ +static int qtudev_open(struct inode *inode, struct file *file) +{ + struct uid_tag_data *utd_entry; + struct proc_qtu_data *pqd_entry; + struct proc_qtu_data *new_pqd_entry = 0; + int res; + bool utd_entry_found; + + if (unlikely(qtu_proc_handling_passive)) + return 0; + + DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n", + current->pid, current->tgid, current_fsuid()); + + spin_lock_bh(&uid_tag_data_tree_lock); + + /* Look for existing uid data, or alloc one. */ + utd_entry = get_uid_data(current_fsuid(), &utd_entry_found); + if (IS_ERR_OR_NULL(utd_entry)) { + res = PTR_ERR(utd_entry); + goto err; + } + + /* Look for existing PID based proc_data */ + pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree, + current->tgid); + if (pqd_entry) { + pr_err("qtaguid: qtudev_open(): %u/%u %u " + "%s already opened\n", + current->pid, current->tgid, current_fsuid(), + QTU_DEV_NAME); + res = -EBUSY; + goto err_unlock_free_utd; + } + + new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC); + if (!new_pqd_entry) { + pr_err("qtaguid: qtudev_open(): %u/%u %u: " + "proc data alloc failed\n", + current->pid, current->tgid, current_fsuid()); + res = -ENOMEM; + goto err_unlock_free_utd; + } + new_pqd_entry->pid = current->tgid; + INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list); + new_pqd_entry->parent_tag_data = utd_entry; + + proc_qtu_data_tree_insert(new_pqd_entry, + &proc_qtu_data_tree); + + spin_unlock_bh(&uid_tag_data_tree_lock); + DR_DEBUG("qtaguid: tracking data for uid=%u\n", current_fsuid()); + file->private_data = new_pqd_entry; + return 0; + +err_unlock_free_utd: + if (!utd_entry_found) { + rb_erase(&utd_entry->node, &uid_tag_data_tree); + kfree(utd_entry); + } + spin_unlock_bh(&uid_tag_data_tree_lock); +err: + return res; +} + +static int qtudev_release(struct inode *inode, struct file *file) +{ + struct proc_qtu_data *pqd_entry = file->private_data; + struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data; + struct sock_tag *st_entry; + struct rb_root st_to_free_tree = RB_ROOT; + struct list_head *entry, *next; + struct tag_ref *tr; + + if (unlikely(qtu_proc_handling_passive)) + return 0; + + /* + * Do not trust the current->pid, it might just be a kworker cleaning + * up after a dead proc. + */ + DR_DEBUG("qtaguid: qtudev_release(): " + "pid=%u tgid=%u uid=%u " + "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n", + current->pid, current->tgid, pqd_entry->parent_tag_data->uid, + pqd_entry, pqd_entry->pid, utd_entry, + utd_entry->num_active_tags); + + spin_lock_bh(&sock_tag_list_lock); + spin_lock_bh(&uid_tag_data_tree_lock); + + /* + * If this proc didn't actually tag anything for itself, or has already + * willingly cleaned up itself ... + */ + put_utd_entry(utd_entry); + + list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) { + st_entry = list_entry(entry, struct sock_tag, list); + DR_DEBUG("qtaguid: %s(): " + "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n", + __func__, + st_entry, st_entry->sk, + current->pid, current->tgid, + pqd_entry->parent_tag_data->uid); + + utd_entry = uid_tag_data_tree_search( + &uid_tag_data_tree, + get_uid_from_tag(st_entry->tag)); + BUG_ON(IS_ERR_OR_NULL(utd_entry)); + DR_DEBUG("qtaguid: %s(): " + "looking for tag=0x%llx in utd_entry=%p\n", __func__, + st_entry->tag, utd_entry); + tr = tag_ref_tree_search(&utd_entry->tag_ref_tree, + st_entry->tag); + BUG_ON(!tr); + BUG_ON(tr->num_sock_tags <= 0); + tr->num_sock_tags--; + free_tag_ref_from_utd_entry(tr, utd_entry); + + rb_erase(&st_entry->sock_node, &sock_tag_tree); + list_del(&st_entry->list); + /* Can't sockfd_put() within spinlock, do it later. */ + sock_tag_tree_insert(st_entry, &st_to_free_tree); + + /* Do not put_utd_entry(utd_entry) someone elses utd_entry */ + } + + rb_erase(&pqd_entry->node, &proc_qtu_data_tree); + kfree(pqd_entry); + file->private_data = NULL; + + spin_unlock_bh(&uid_tag_data_tree_lock); + spin_unlock_bh(&sock_tag_list_lock); + + + sock_tag_tree_erase(&st_to_free_tree); + + + return 0; +} + +/*------------------------------------------*/ +static const struct file_operations qtudev_fops = { + .owner = THIS_MODULE, + .open = qtudev_open, + .release = qtudev_release, +}; + +static struct miscdevice qtu_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = QTU_DEV_NAME, + .fops = &qtudev_fops, + /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */ +}; + +/*------------------------------------------*/ static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir) { int ret; @@ -2017,7 +2524,8 @@ static int __init qtaguid_mt_init(void) { if (qtaguid_proc_register(&xt_qtaguid_procdir) || iface_stat_init(xt_qtaguid_procdir) - || xt_register_match(&qtaguid_mt_reg)) + || xt_register_match(&qtaguid_mt_reg) + || misc_register(&qtu_device)) return -1; return 0; } diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h new file mode 100644 index 0000000..752e196 --- /dev/null +++ b/net/netfilter/xt_qtaguid_internal.h @@ -0,0 +1,305 @@ +/* + * Kernel iptables module to track stats for packets based on user tags. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __XT_QTAGUID_INTERNAL_H__ +#define __XT_QTAGUID_INTERNAL_H__ + +#include <linux/types.h> +#include <linux/rbtree.h> +#include <linux/spinlock_types.h> +#include <linux/workqueue.h> + +/* Define/comment out these *DEBUG to compile in/out the pr_debug calls. */ +/* Iface handling */ +#define IDEBUG +/* Iptable Matching. Per packet. */ +#define MDEBUG +/* Red-black tree handling. Per packet. */ +#define RDEBUG +/* procfs ctrl/stats handling */ +#define CDEBUG +/* dev and resource tracking */ +#define DDEBUG + +/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */ +#define DEFAULT_DEBUG_MASK 0 + + +#define IDEBUG_MASK (1<<0) +#define MDEBUG_MASK (1<<1) +#define RDEBUG_MASK (1<<2) +#define CDEBUG_MASK (1<<3) +#define DDEBUG_MASK (1<<4) + +#define MSK_DEBUG(mask, ...) do { \ + if (unlikely(debug_mask & (mask))) \ + pr_debug(__VA_ARGS__); \ + } while (0) +#ifdef IDEBUG +#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__) +#else +#define IF_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef MDEBUG +#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__) +#else +#define MT_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef RDEBUG +#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__) +#else +#define RB_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef CDEBUG +#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__) +#else +#define CT_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef DDEBUG +#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__) +#else +#define DR_DEBUG(...) no_printk(__VA_ARGS__) +#endif + +extern uint debug_mask; + +/*---------------------------------------------------------------------------*/ +/* + * Tags: + * + * They represent what the data usage counters will be tracked against. + * By default a tag is just based on the UID. + * The UID is used as the base for policing, and can not be ignored. + * So a tag will always at least represent a UID (uid_tag). + * + * A tag can be augmented with an "accounting tag" which is associated + * with a UID. + * User space can set the acct_tag portion of the tag which is then used + * with sockets: all data belonging to that socket will be counted against the + * tag. The policing is then based on the tag's uid_tag portion, + * and stats are collected for the acct_tag portion separately. + * + * There could be + * a: {acct_tag=1, uid_tag=10003} + * b: {acct_tag=2, uid_tag=10003} + * c: {acct_tag=3, uid_tag=10003} + * d: {acct_tag=0, uid_tag=10003} + * a, b, and c represent tags associated with specific sockets. + * d is for the totals for that uid, including all untagged traffic. + * Typically d is used with policing/quota rules. + * + * We want tag_t big enough to distinguish uid_t and acct_tag. + * It might become a struct if needed. + * Nothing should be using it as an int. + */ +typedef uint64_t tag_t; /* Only used via accessors */ + +#define TAG_UID_MASK 0xFFFFFFFFULL +#define TAG_ACCT_MASK (~0xFFFFFFFFULL) + +static inline int tag_compare(tag_t t1, tag_t t2) +{ + return t1 < t2 ? -1 : t1 == t2 ? 0 : 1; +} + +static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid) +{ + return acct_tag | uid; +} +static inline tag_t make_tag_from_uid(uid_t uid) +{ + return uid; +} +static inline uid_t get_uid_from_tag(tag_t tag) +{ + return tag & TAG_UID_MASK; +} +static inline tag_t get_utag_from_tag(tag_t tag) +{ + return tag & TAG_UID_MASK; +} +static inline tag_t get_atag_from_tag(tag_t tag) +{ + return tag & TAG_ACCT_MASK; +} + +static inline bool valid_atag(tag_t tag) +{ + return !(tag & TAG_UID_MASK); +} +static inline tag_t make_atag_from_value(uint32_t value) +{ + return (uint64_t)value << 32; +} +/*---------------------------------------------------------------------------*/ + +/* + * Maximum number of socket tags that a UID is allowed to have active. + * Multiple processes belonging to the same UID contribute towards this limit. + * Special UIDs that can impersonate a UID also contribute (e.g. download + * manager, ...) + */ +#define DEFAULT_MAX_SOCK_TAGS 1024 + +/* + * For now we only track 2 sets of counters. + * The default set is 0. + * Userspace can activate another set for a given uid being tracked. + */ +#define IFS_MAX_COUNTER_SETS 2 + +enum ifs_tx_rx { + IFS_TX, + IFS_RX, + IFS_MAX_DIRECTIONS +}; + +/* For now, TCP, UDP, the rest */ +enum ifs_proto { + IFS_TCP, + IFS_UDP, + IFS_PROTO_OTHER, + IFS_MAX_PROTOS +}; + +struct byte_packet_counters { + uint64_t bytes; + uint64_t packets; +}; + +struct data_counters { + struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS]; +}; + +/* Generic X based nodes used as a base for rb_tree ops */ +struct tag_node { + struct rb_node node; + tag_t tag; +}; + +struct tag_stat { + struct tag_node tn; + struct data_counters counters; + /* + * If this tag is acct_tag based, we need to count against the + * matching parent uid_tag. + */ + struct data_counters *parent_counters; +}; + +struct iface_stat { + struct list_head list; /* in iface_stat_list */ + char *ifname; + uint64_t rx_bytes; + uint64_t rx_packets; + uint64_t tx_bytes; + uint64_t tx_packets; + bool active; + struct proc_dir_entry *proc_ptr; + + struct rb_root tag_stat_tree; + spinlock_t tag_stat_list_lock; +}; + +/* This is needed to create proc_dir_entries from atomic context. */ +struct iface_stat_work { + struct work_struct iface_work; + struct iface_stat *iface_entry; +}; + +/* + * Track tag that this socket is transferring data for, and not necessarily + * the uid that owns the socket. + * This is the tag against which tag_stat.counters will be billed. + * These structs need to be looked up by sock and pid. + */ +struct sock_tag { + struct rb_node sock_node; + struct sock *sk; /* Only used as a number, never dereferenced */ + /* The socket is needed for sockfd_put() */ + struct socket *socket; + /* Used to associate with a given pid */ + struct list_head list; /* in proc_qtu_data.sock_tag_list */ + pid_t pid; + + tag_t tag; +}; + +struct qtaguid_event_counts { + /* Various successful events */ + atomic64_t sockets_tagged; + atomic64_t sockets_untagged; + atomic64_t counter_set_changes; + atomic64_t delete_cmds; + atomic64_t iface_events; /* Number of NETDEV_* events handled */ + /* + * match_found_sk_*: numbers related to the netfilter matching + * function finding a sock for the sk_buff. + */ + atomic64_t match_found_sk; /* An sk was already in the sk_buff. */ + /* The connection tracker had the sk. */ + atomic64_t match_found_sk_in_ct; + /* + * No sk could be found. No apparent owner. Could happen with + * unsolicited traffic. + */ + atomic64_t match_found_sk_none; +}; + +/* Track the set active_set for the given tag. */ +struct tag_counter_set { + struct tag_node tn; + int active_set; +}; + +/*----------------------------------------------*/ +/* + * The qtu uid data is used to track resources that are created directly or + * indirectly by processes (uid tracked). + * It is shared by the processes with the same uid. + * Some of the resource will be counted to prevent further rogue allocations, + * some will need freeing once the owner process (uid) exits. + */ +struct uid_tag_data { + struct rb_node node; + uid_t uid; + + /* + * For the uid, how many accounting tags have been set. + */ + int num_active_tags; + struct rb_root tag_ref_tree; + /* No tag_node_tree_lock; use uid_tag_data_tree_lock */ +}; + +struct tag_ref { + struct tag_node tn; + + /* + * This tracks the number of active sockets that have a tag on them + * which matches this tag_ref.tn.tag. + * A tag ref can live on after the sockets are untagged. + * A tag ref can only be removed during a tag delete command. + */ + int num_sock_tags; +}; + +struct proc_qtu_data { + struct rb_node node; + pid_t pid; + + struct uid_tag_data *parent_tag_data; + + /* Tracks the sock_tags that need freeing upon this proc's death */ + struct list_head sock_tag_list; + /* No spinlock_t sock_tag_list_lock; use the global one. */ +}; + +/*----------------------------------------------*/ +#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */ diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c new file mode 100644 index 0000000..3d05447 --- /dev/null +++ b/net/netfilter/xt_qtaguid_print.c @@ -0,0 +1,397 @@ +/* + * Pretty printing Support for iptables xt_qtaguid module. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * There are run-time debug flags enabled via the debug_mask module param, or + * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h. + */ +#define DEBUG + +#include <linux/fs.h> +#include <linux/gfp.h> +#include <linux/net.h> +#include <linux/rbtree.h> +#include <linux/slab.h> +#include <linux/spinlock_types.h> + + +#include "xt_qtaguid_internal.h" +#include "xt_qtaguid_print.h" + +char *pp_tag_t(tag_t *tag) +{ + if (!tag) + return kasprintf(GFP_ATOMIC, "tag_t@null{}"); + return kasprintf(GFP_ATOMIC, + "tag_t@%p{tag=0x%llx, uid=%u}", + tag, *tag, get_uid_from_tag(*tag)); +} + +char *pp_data_counters(struct data_counters *dc, bool showValues) +{ + if (!dc) + return kasprintf(GFP_ATOMIC, "data_counters@null{}"); + if (showValues) + return kasprintf( + GFP_ATOMIC, "data_counters@%p{" + "set0{" + "rx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}, " + "tx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}}, " + "set1{" + "rx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}, " + "tx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}}}", + dc, + dc->bpc[0][IFS_RX][IFS_TCP].bytes, + dc->bpc[0][IFS_RX][IFS_TCP].packets, + dc->bpc[0][IFS_RX][IFS_UDP].bytes, + dc->bpc[0][IFS_RX][IFS_UDP].packets, + dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes, + dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets, + dc->bpc[0][IFS_TX][IFS_TCP].bytes, + dc->bpc[0][IFS_TX][IFS_TCP].packets, + dc->bpc[0][IFS_TX][IFS_UDP].bytes, + dc->bpc[0][IFS_TX][IFS_UDP].packets, + dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes, + dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets, + dc->bpc[1][IFS_RX][IFS_TCP].bytes, + dc->bpc[1][IFS_RX][IFS_TCP].packets, + dc->bpc[1][IFS_RX][IFS_UDP].bytes, + dc->bpc[1][IFS_RX][IFS_UDP].packets, + dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes, + dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets, + dc->bpc[1][IFS_TX][IFS_TCP].bytes, + dc->bpc[1][IFS_TX][IFS_TCP].packets, + dc->bpc[1][IFS_TX][IFS_UDP].bytes, + dc->bpc[1][IFS_TX][IFS_UDP].packets, + dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes, + dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets); + else + return kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc); +} + +char *pp_tag_node(struct tag_node *tn) +{ + char *tag_str; + char *res; + + if (!tn) + return kasprintf(GFP_ATOMIC, "tag_node@null{}"); + tag_str = pp_tag_t(&tn->tag); + res = kasprintf(GFP_ATOMIC, + "tag_node@%p{tag=%s}", + tn, tag_str); + kfree(tag_str); + return res; +} + +char *pp_tag_ref(struct tag_ref *tr) +{ + char *tn_str; + char *res; + + if (!tr) + return kasprintf(GFP_ATOMIC, "tag_ref@null{}"); + tn_str = pp_tag_node(&tr->tn); + res = kasprintf(GFP_ATOMIC, + "tag_ref@%p{%s, num_sock_tags=%d}", + tr, tn_str, tr->num_sock_tags); + kfree(tn_str); + return res; +} + +char *pp_tag_stat(struct tag_stat *ts) +{ + char *tn_str; + char *counters_str; + char *parent_counters_str; + char *res; + + if (!ts) + return kasprintf(GFP_ATOMIC, "tag_stat@null{}"); + tn_str = pp_tag_node(&ts->tn); + counters_str = pp_data_counters(&ts->counters, true); + parent_counters_str = pp_data_counters(ts->parent_counters, false); + res = kasprintf(GFP_ATOMIC, + "tag_stat@%p{%s, counters=%s, parent_counters=%s}", + ts, tn_str, counters_str, parent_counters_str); + kfree(tn_str); + kfree(counters_str); + kfree(parent_counters_str); + return res; +} + +char *pp_iface_stat(struct iface_stat *is) +{ + if (!is) + return kasprintf(GFP_ATOMIC, "iface_stat@null{}"); + return kasprintf(GFP_ATOMIC, "iface_stat@%p{" + "list=list_head{...}, " + "ifname=%s, " + "rx_bytes=%llu, " + "rx_packets=%llu, " + "tx_bytes=%llu, " + "tx_packets=%llu, " + "active=%d, " + "proc_ptr=%p, " + "tag_stat_tree=rb_root{...}}", + is, + is->ifname, + is->rx_bytes, + is->rx_packets, + is->tx_bytes, + is->tx_packets, + is->active, + is->proc_ptr); +} + +char *pp_sock_tag(struct sock_tag *st) +{ + char *tag_str; + char *res; + + if (!st) + return kasprintf(GFP_ATOMIC, "sock_tag@null{}"); + tag_str = pp_tag_t(&st->tag); + res = kasprintf(GFP_ATOMIC, "sock_tag@%p{" + "sock_node=rb_node{...}, " + "sk=%p socket=%p (f_count=%lu), list=list_head{...}, " + "pid=%u, tag=%s}", + st, st->sk, st->socket, atomic_long_read( + &st->socket->file->f_count), + st->pid, tag_str); + kfree(tag_str); + return res; +} + +char *pp_uid_tag_data(struct uid_tag_data *utd) +{ + char *res; + + if (!utd) + return kasprintf(GFP_ATOMIC, "uid_tag_data@null{}"); + res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{" + "uid=%u, num_active_acct_tags=%d, " + "tag_node_tree=rb_root{...}, " + "proc_qtu_data_tree=rb_root{...}}", + utd, utd->uid, + utd->num_active_tags); + return res; +} + +char *pp_proc_qtu_data(struct proc_qtu_data *pqd) +{ + char *parent_tag_data_str; + char *res; + + if (!pqd) + return kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}"); + parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data); + res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{" + "node=rb_node{...}, pid=%u, " + "parent_tag_data=%s, " + "sock_tag_list=list_head{...}}", + pqd, pqd->pid, parent_tag_data_str + ); + kfree(parent_tag_data_str); + return res; +} + +/*------------------------------------------*/ +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree) +{ + struct rb_node *node; + struct sock_tag *sock_tag_entry; + char *str; + + str = "sock_tag_tree=rb_root{"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(sock_tag_tree); + node; + node = rb_next(node)) { + sock_tag_entry = rb_entry(node, struct sock_tag, sock_node); + str = pp_sock_tag(sock_tag_entry); + CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + } + indent_level--; + str = "}"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list) +{ + struct sock_tag *sock_tag_entry; + char *str; + + str = "sock_tag_list=list_head{"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + list_for_each_entry(sock_tag_entry, sock_tag_list, list) { + str = pp_sock_tag(sock_tag_entry); + CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + } + indent_level--; + str = "}"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree) +{ + char *str; + struct rb_node *node; + struct proc_qtu_data *proc_qtu_data_entry; + + str = "proc_qtu_data_tree=rb_root{"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(proc_qtu_data_tree); + node; + node = rb_next(node)) { + proc_qtu_data_entry = rb_entry(node, + struct proc_qtu_data, + node); + str = pp_proc_qtu_data(proc_qtu_data_entry); + CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, + str); + kfree(str); + indent_level++; + prdebug_sock_tag_list(indent_level, + &proc_qtu_data_entry->sock_tag_list); + indent_level--; + + } + indent_level--; + str = "}"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree) +{ + char *str; + struct rb_node *node; + struct tag_ref *tag_ref_entry; + + str = "tag_ref_tree{"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(tag_ref_tree); + node; + node = rb_next(node)) { + tag_ref_entry = rb_entry(node, + struct tag_ref, + tn.node); + str = pp_tag_ref(tag_ref_entry); + CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, + str); + kfree(str); + } + indent_level--; + str = "}"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree) +{ + char *str; + struct rb_node *node; + struct uid_tag_data *uid_tag_data_entry; + + str = "uid_tag_data_tree=rb_root{"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(uid_tag_data_tree); + node; + node = rb_next(node)) { + uid_tag_data_entry = rb_entry(node, struct uid_tag_data, + node); + str = pp_uid_tag_data(uid_tag_data_entry); + CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) { + indent_level++; + prdebug_tag_ref_tree(indent_level, + &uid_tag_data_entry->tag_ref_tree); + indent_level--; + } + } + indent_level--; + str = "}"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree) +{ + char *str; + struct rb_node *node; + struct tag_stat *ts_entry; + + str = "tag_stat_tree{"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(tag_stat_tree); + node; + node = rb_next(node)) { + ts_entry = rb_entry(node, struct tag_stat, tn.node); + str = pp_tag_stat(ts_entry); + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, + str); + kfree(str); + } + indent_level--; + str = "}"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list) +{ + char *str; + struct iface_stat *iface_entry; + + str = "iface_stat_list=list_head{"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + list_for_each_entry(iface_entry, iface_stat_list, list) { + str = pp_iface_stat(iface_entry); + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); + kfree(str); + + spin_lock_bh(&iface_entry->tag_stat_list_lock); + if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) { + indent_level++; + prdebug_tag_stat_tree(indent_level, + &iface_entry->tag_stat_tree); + indent_level--; + } + spin_unlock_bh(&iface_entry->tag_stat_list_lock); + } + indent_level--; + str = "}"; + CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str); +} diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h new file mode 100644 index 0000000..e26020c --- /dev/null +++ b/net/netfilter/xt_qtaguid_print.h @@ -0,0 +1,39 @@ +/* + * Pretty printing Support for iptables xt_qtaguid module. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __XT_QTAGUID_PRINT_H__ +#define __XT_QTAGUID_PRINT_H__ + +#include "xt_qtaguid_internal.h" + +char *pp_tag_t(tag_t *tag); +char *pp_data_counters(struct data_counters *dc, bool showValues); +char *pp_tag_node(struct tag_node *tn); +char *pp_tag_ref(struct tag_ref *tr); +char *pp_tag_stat(struct tag_stat *ts); +char *pp_iface_stat(struct iface_stat *is); +char *pp_sock_tag(struct sock_tag *st); +char *pp_uid_tag_data(struct uid_tag_data *qtd); +char *pp_proc_qtu_data(struct proc_qtu_data *pqd); + +/*------------------------------------------*/ +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list); +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree); +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree); +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree); +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree); +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree); +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list); +#endif /* ifndef __XT_QTAGUID_PRINT_H__ */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ae0c225..cbbc927 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -17,7 +17,7 @@ #include "nl80211.h" #include "wext-compat.h" -#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) +#define IEEE80211_SCAN_RESULT_EXPIRE (3 * HZ) void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) { diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c index f217421..b46321c 100644 --- a/sound/soc/codecs/twl6040.c +++ b/sound/soc/codecs/twl6040.c @@ -1121,7 +1121,7 @@ static const struct snd_kcontrol_new ep_driver_switch_controls = /* Headset power mode */ static const char *twl6040_headset_power_texts[] = { - "Low-Power", "High-Perfomance", + "Low-Power", "High-Performance", }; static const struct soc_enum twl6040_headset_power_enum = diff --git a/sound/soc/omap/omap-abe-dsp.c b/sound/soc/omap/omap-abe-dsp.c index 940c42c..bb46477 100644 --- a/sound/soc/omap/omap-abe-dsp.c +++ b/sound/soc/omap/omap-abe-dsp.c @@ -140,7 +140,6 @@ struct abe_data { int opp_req_count; u16 router[16]; - int loss_count; struct snd_pcm_substream *ping_pong_substream; int first_irq; @@ -2007,8 +2006,6 @@ static int aess_set_runtime_opp_level(struct abe_data *abe) static int aess_save_context(struct abe_data *abe) { - struct omap4_abe_dsp_pdata *pdata = abe->abe_pdata; - /* TODO: Find a better way to save/retore gains after OFF mode */ abe_mute_gain(MIXSDT, MIX_SDT_INPUT_UP_MIXER); @@ -2039,9 +2036,8 @@ static int aess_save_context(struct abe_data *abe) abe_mute_gain(GAINS_DMIC3, GAIN_RIGHT_OFFSET); abe_mute_gain(GAINS_AMIC, GAIN_LEFT_OFFSET); abe_mute_gain(GAINS_AMIC, GAIN_RIGHT_OFFSET); - - if (pdata->get_context_loss_count) - abe->loss_count = pdata->get_context_loss_count(abe->dev); + abe_mute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET); + abe_mute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET); return 0; } @@ -2049,7 +2045,7 @@ static int aess_save_context(struct abe_data *abe) static int aess_restore_context(struct abe_data *abe) { struct omap4_abe_dsp_pdata *pdata = abe->abe_pdata; - int i, loss_count = 0, ret; + int i, ret; if (pdata && pdata->device_scale) { ret = pdata->device_scale(the_abe->dev, the_abe->dev, @@ -2060,10 +2056,7 @@ static int aess_restore_context(struct abe_data *abe) } } - if (pdata->get_context_loss_count) - loss_count = pdata->get_context_loss_count(abe->dev); - - if (loss_count != the_abe->loss_count) + if (pdata->was_context_lost && pdata->was_context_lost(abe->dev)) abe_reload_fw(abe->firmware); /* TODO: Find a better way to save/retore gains after dor OFF mode */ @@ -2095,6 +2088,8 @@ static int aess_restore_context(struct abe_data *abe) abe_unmute_gain(GAINS_DMIC3, GAIN_RIGHT_OFFSET); abe_unmute_gain(GAINS_AMIC, GAIN_LEFT_OFFSET); abe_unmute_gain(GAINS_AMIC, GAIN_RIGHT_OFFSET); + abe_unmute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET); + abe_unmute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET); abe_set_router_configuration(UPROUTE, 0, (u32 *)abe->router); @@ -2344,7 +2339,6 @@ static int abe_add_widgets(struct snd_soc_platform *platform) static int abe_suspend(struct snd_soc_dai *dai) { struct abe_data *abe = the_abe; - struct omap4_abe_dsp_pdata *pdata = abe->abe_pdata; int ret = 0; dev_dbg(dai->dev, "%s: %s active %d\n", @@ -2363,7 +2357,11 @@ static int abe_suspend(struct snd_soc_dai *dai) case OMAP_ABE_DAI_PDM_DL1: case OMAP_ABE_DAI_PDM_DL2: case OMAP_ABE_DAI_PDM_VIB: + break; case OMAP_ABE_DAI_BT_VX: + abe_mute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET); + abe_mute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET); + break; case OMAP_ABE_DAI_MM_FM: case OMAP_ABE_DAI_MODEM: break; @@ -2386,9 +2384,6 @@ static int abe_suspend(struct snd_soc_dai *dai) goto out; } - if (pdata->get_context_loss_count) - abe->loss_count = pdata->get_context_loss_count(abe->dev); - out: pm_runtime_put_sync(abe->dev); return ret; @@ -2398,7 +2393,7 @@ static int abe_resume(struct snd_soc_dai *dai) { struct abe_data *abe = the_abe; struct omap4_abe_dsp_pdata *pdata = abe->abe_pdata; - int i, loss_count = 0, ret = 0; + int i, ret = 0; dev_dbg(dai->dev, "%s: %s active %d\n", __func__, dai->name, dai->active); @@ -2406,8 +2401,9 @@ static int abe_resume(struct snd_soc_dai *dai) if (!dai->active) return 0; - if (pdata->get_context_loss_count) - loss_count = pdata->get_context_loss_count(abe->dev); + /* context retained, no need to restore */ + if (pdata->was_context_lost && !pdata->was_context_lost(abe->dev)) + return 0; pm_runtime_get_sync(abe->dev); @@ -2420,8 +2416,7 @@ static int abe_resume(struct snd_soc_dai *dai) } } - if (loss_count != abe->loss_count) - abe_reload_fw(abe->firmware); + abe_reload_fw(abe->firmware); switch (dai->id) { case OMAP_ABE_DAI_PDM_UL: @@ -2431,7 +2426,11 @@ static int abe_resume(struct snd_soc_dai *dai) case OMAP_ABE_DAI_PDM_DL1: case OMAP_ABE_DAI_PDM_DL2: case OMAP_ABE_DAI_PDM_VIB: + break; case OMAP_ABE_DAI_BT_VX: + abe_unmute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET); + abe_unmute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET); + break; case OMAP_ABE_DAI_MM_FM: case OMAP_ABE_DAI_MODEM: break; @@ -2459,6 +2458,8 @@ static int abe_resume(struct snd_soc_dai *dai) for (i = 0; i < abe->hdr.num_equ; i++) abe_dsp_set_equalizer(i, abe->equ_profile[i]); + for (i = 0; i < ABE_NUM_MONO_MIXERS; i++) + abe_dsp_set_mono_mixer(MIX_DL1_MONO + i, abe->mono_mix[i]); out: pm_runtime_put_sync(abe->dev); return ret; diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c index ea2081f..81e6866 100644 --- a/sound/soc/omap/omap-mcpdm.c +++ b/sound/soc/omap/omap-mcpdm.c @@ -43,6 +43,7 @@ #include <plat/dma.h> #include <plat/omap_hwmod.h> +#include <plat/mcpdm.h> #include "omap-mcpdm.h" #include "omap-pcm.h" #if defined(CONFIG_SND_OMAP_SOC_ABE_DSP) ||\ @@ -72,6 +73,8 @@ struct omap_mcpdm { struct omap_abe_port *dl_port; struct omap_abe_port *ul_port; + u32 *reg_cache; + /* channel data */ u32 dn_channels; u32 up_channels; @@ -116,6 +119,17 @@ static inline int omap_mcpdm_read(struct omap_mcpdm *mcpdm, u16 reg) return __raw_readl(mcpdm->io_base + reg); } +static inline void omap_mcpdm_write_cache(struct omap_mcpdm *mcpdm, + u16 reg, u32 val) +{ + mcpdm->reg_cache[reg / sizeof(u32)] = val; +} + +static inline int omap_mcpdm_read_cache(struct omap_mcpdm *mcpdm, u16 reg) +{ + return mcpdm->reg_cache[reg / sizeof(u32)]; +} + #ifdef DEBUG static void omap_mcpdm_reg_dump(struct omap_mcpdm *mcpdm) { @@ -536,6 +550,68 @@ static struct snd_soc_dai_ops omap_mcpdm_dai_ops = { .trigger = omap_mcpdm_dai_trigger, }; +#ifdef CONFIG_PM +static int omap_mcpdm_suspend(struct snd_soc_dai *dai) +{ + struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); + + /* save context only if we are streaming */ + if (!mcpdm->active) + return 0; + + omap_mcpdm_write_cache(mcpdm, MCPDM_DN_OFFSET, + omap_mcpdm_read(mcpdm, MCPDM_DN_OFFSET)); + omap_mcpdm_write_cache(mcpdm, MCPDM_IRQENABLE_SET, + omap_mcpdm_read(mcpdm, MCPDM_IRQENABLE_SET)); + omap_mcpdm_write_cache(mcpdm, MCPDM_DMAENABLE_SET, + omap_mcpdm_read(mcpdm, MCPDM_DMAENABLE_SET)); + omap_mcpdm_write_cache(mcpdm, MCPDM_FIFO_CTRL_DN, + omap_mcpdm_read(mcpdm, MCPDM_FIFO_CTRL_DN)); + omap_mcpdm_write_cache(mcpdm, MCPDM_FIFO_CTRL_UP, + omap_mcpdm_read(mcpdm, MCPDM_FIFO_CTRL_UP)); + omap_mcpdm_write_cache(mcpdm, MCPDM_CTRL, + omap_mcpdm_read(mcpdm, MCPDM_CTRL)); + + pm_runtime_put_sync(mcpdm->dev); + + return 0; +} + +static int omap_mcpdm_resume(struct snd_soc_dai *dai) +{ + struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); + struct omap_mcpdm_platform_data *pdata = mcpdm->pdata; + + /* restore context only if we were streaming */ + if (!mcpdm->active) + return 0; + + if (!pdata->was_context_lost(mcpdm->dev)) + return 0; + + pm_runtime_get_sync(mcpdm->dev); + + /* restore from reg cache */ + omap_mcpdm_write(mcpdm, MCPDM_DN_OFFSET, + omap_mcpdm_read_cache(mcpdm, MCPDM_DN_OFFSET)); + omap_mcpdm_write(mcpdm, MCPDM_IRQENABLE_SET, + omap_mcpdm_read_cache(mcpdm, MCPDM_IRQENABLE_SET)); + omap_mcpdm_write(mcpdm, MCPDM_DMAENABLE_SET, + omap_mcpdm_read_cache(mcpdm, MCPDM_DMAENABLE_SET)); + omap_mcpdm_write(mcpdm, MCPDM_FIFO_CTRL_DN, + omap_mcpdm_read_cache(mcpdm, MCPDM_FIFO_CTRL_DN)); + omap_mcpdm_write(mcpdm, MCPDM_FIFO_CTRL_UP, + omap_mcpdm_read_cache(mcpdm, MCPDM_FIFO_CTRL_UP)); + omap_mcpdm_write(mcpdm, MCPDM_CTRL, + omap_mcpdm_read_cache(mcpdm, MCPDM_CTRL)); + + return 0; +} +#else +#define omap_mcpdm_suspend NULL +#define omap_mcpdm_resume NULL +#endif + static int omap_mcpdm_probe(struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); @@ -577,6 +653,8 @@ static struct snd_soc_dai_driver omap_mcpdm_dai[] = { .remove = omap_mcpdm_remove, .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, + .suspend = omap_mcpdm_suspend, + .resume = omap_mcpdm_resume, .playback = { .channels_min = 1, .channels_max = 4, @@ -590,6 +668,8 @@ static struct snd_soc_dai_driver omap_mcpdm_dai[] = { .id = MCPDM_LEGACY_DAI_UL1, .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, + .suspend = omap_mcpdm_suspend, + .resume = omap_mcpdm_resume, .capture = { .channels_min = 1, .channels_max = 2, @@ -605,6 +685,8 @@ static struct snd_soc_dai_driver omap_mcpdm_dai[] = { .id = MCPDM_ABE_DAI_DL1, .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, + .suspend = omap_mcpdm_suspend, + .resume = omap_mcpdm_resume, .playback = { .channels_min = 1, .channels_max = 2, @@ -618,6 +700,8 @@ static struct snd_soc_dai_driver omap_mcpdm_dai[] = { .id = MCPDM_ABE_DAI_DL2, .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, + .suspend = omap_mcpdm_suspend, + .resume = omap_mcpdm_resume, .playback = { .channels_min = 1, .channels_max = 2, @@ -631,6 +715,8 @@ static struct snd_soc_dai_driver omap_mcpdm_dai[] = { .id = MCPDM_ABE_DAI_VIB, .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, + .suspend = omap_mcpdm_suspend, + .resume = omap_mcpdm_resume, .playback = { .channels_min = 1, .channels_max = 2, @@ -644,6 +730,8 @@ static struct snd_soc_dai_driver omap_mcpdm_dai[] = { .id = MCPDM_ABE_DAI_UL1, .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, + .suspend = omap_mcpdm_suspend, + .resume = omap_mcpdm_resume, .capture = { .channels_min = 1, .channels_max = 2, @@ -657,6 +745,7 @@ static struct snd_soc_dai_driver omap_mcpdm_dai[] = { static __devinit int asoc_mcpdm_probe(struct platform_device *pdev) { + struct omap_mcpdm_platform_data *pdata = pdev->dev.platform_data; struct omap_mcpdm *mcpdm; struct resource *res; int ret = 0, err; @@ -681,6 +770,12 @@ static __devinit int asoc_mcpdm_probe(struct platform_device *pdev) goto err_iomap; } + mcpdm->reg_cache = kzalloc(resource_size(res), GFP_KERNEL); + if (!mcpdm->reg_cache) { + ret = -ENOMEM; + goto err_cache; + } + mcpdm->irq = platform_get_irq(pdev, 0); if (mcpdm->irq < 0) { ret = mcpdm->irq; @@ -688,6 +783,7 @@ static __devinit int asoc_mcpdm_probe(struct platform_device *pdev) } mcpdm->dev = &pdev->dev; + mcpdm->pdata = pdata; /* DL1 and DL2 DC offset values will be different for each device */ mcpdm->dl1_offset = DN_OFST_MAX >> 1; @@ -732,6 +828,8 @@ err_ul: omap_abe_port_mgr_put(mcpdm->abe); #endif err_irq: + kfree(mcpdm->reg_cache); +err_cache: iounmap(mcpdm->io_base); err_iomap: release_mem_region(res->start, resource_size(res)); @@ -759,7 +857,7 @@ static int __devexit asoc_mcpdm_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(mcpdm->io_base); - + kfree(mcpdm->reg_cache); kfree(mcpdm); return 0; } |