From 6dbb6ee090e810be337945a7a64d647549b70328 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 7 Jan 2013 15:30:44 +0100 Subject: ARM: 7617/1: mmc: mmci: Support for DDR mode Add support for DDR mode which may be used for the ux500v2 variant. Corresponding capabilities to enable the DDR support must be set in the platform struct to enable the functionality. Signed-off-by: Ulf Hansson Signed-off-by: Russell King diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 1507723..fba5107 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -202,6 +202,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) clk |= MCI_ST_8BIT_BUS; + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + clk |= MCI_ST_UX500_NEG_EDGE; + mmci_write_clkreg(host, clk); } @@ -680,6 +683,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) mmci_write_clkreg(host, clk); } + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + datactrl |= MCI_ST_DPSM_DDRMODE; + /* * Attempt to use DMA operation mode, if this * should fail, fall back to PIO mode -- cgit v0.10.2 From 70be208f0bd75eb81264f681e36485d0617d612f Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 7 Jan 2013 15:35:06 +0100 Subject: ARM: 7618/1: mmc: mmci: Support MMC_PM_KEEP_POWER Add MMC_PM_KEEP_POWER to pm_caps so SDIO clients are able to use this option to prevent power off in suspend. Signed-off-by: Ulf Hansson Signed-off-by: Russell King diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index fba5107..442b011 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -1416,6 +1417,9 @@ static int mmci_probe(struct amba_device *dev, mmc->caps = plat->capabilities; mmc->caps2 = plat->capabilities2; + /* We support these PM capabilities. */ + mmc->pm_caps = MMC_PM_KEEP_POWER; + /* * We can do SGIO */ -- cgit v0.10.2 From 599c1d5c750ddf528c7c6d3cdc466708f0502e66 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 7 Jan 2013 16:22:50 +0100 Subject: ARM: 7620/1: mmc: mmci: Convert to use mmc_regulator_get_supply By using the mmc_regulator_get_supply API we are able to do some cleanups of the regulator code. Additionally let the regulator API handle the error printing. Cc: Chris Ball Signed-off-by: Ulf Hansson Signed-off-by: Russell King diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 442b011..3094ea3e 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1093,7 +1093,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct variant_data *variant = host->variant; u32 pwr = 0; unsigned long flags; - int ret; pm_runtime_get_sync(mmc_dev(mmc)); @@ -1103,23 +1102,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: - if (host->vcc) - ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); break; case MMC_POWER_UP: - if (host->vcc) { - ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); - if (ret) { - dev_err(mmc_dev(mmc), "unable to set OCR\n"); - /* - * The .set_ios() function in the mmc_host_ops - * struct return void, and failing to set the - * power should be rare so we print an error - * and return here. - */ - goto out; - } - } + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + /* * The ST Micro variant doesn't have the PL180s MCI_PWR_UP * and instead uses MCI_PWR_ON so apply whatever value is @@ -1168,7 +1157,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_unlock_irqrestore(&host->lock, flags); - out: pm_runtime_mark_last_busy(mmc_dev(mmc)); pm_runtime_put_autosuspend(mmc_dev(mmc)); } @@ -1391,29 +1379,13 @@ static int mmci_probe(struct amba_device *dev, } else dev_warn(&dev->dev, "could not get default pinstate\n"); -#ifdef CONFIG_REGULATOR - /* If we're using the regulator framework, try to fetch a regulator */ - host->vcc = regulator_get(&dev->dev, "vmmc"); - if (IS_ERR(host->vcc)) - host->vcc = NULL; - else { - int mask = mmc_regulator_get_ocrmask(host->vcc); - - if (mask < 0) - dev_err(&dev->dev, "error getting OCR mask (%d)\n", - mask); - else { - host->mmc->ocr_avail = (u32) mask; - if (plat->ocr_mask) - dev_warn(&dev->dev, - "Provided ocr_mask/setpower will not be used " - "(using regulator instead)\n"); - } - } -#endif - /* Fall back to platform data if no regulator is found */ - if (host->vcc == NULL) + /* Get regulators and the supported OCR mask */ + mmc_regulator_get_supply(mmc); + if (!mmc->ocr_avail) mmc->ocr_avail = plat->ocr_mask; + else if (plat->ocr_mask) + dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); + mmc->caps = plat->capabilities; mmc->caps2 = plat->capabilities2; @@ -1595,10 +1567,6 @@ static int mmci_remove(struct amba_device *dev) clk_disable_unprepare(host->clk); clk_put(host->clk); - if (host->vcc) - mmc_regulator_set_ocr(mmc, host->vcc, 0); - regulator_put(host->vcc); - mmc_free_host(mmc); amba_release_regions(dev); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index d34d8c0..a22213e 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -193,7 +193,6 @@ struct mmci_host { /* pio stuff */ struct sg_mapping_iter sg_miter; unsigned int size; - struct regulator *vcc; /* pinctrl handles */ struct pinctrl *pinctrl; -- cgit v0.10.2 From 638591cd7b601d403ed703d55062b48c32ea8cfb Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 10 Jan 2013 12:20:15 +0100 Subject: ARM: 7626/1: arm/crypto: Make asm SHA-1 and AES code Thumb-2 compatible This patch fixes aes-armv4.S and sha1-armv4-large.S to work natively in Thumb. This allows ARM/Thumb interworking workarounds to be removed. I also take the opportunity to convert some explicit assembler directives for exported functions to the standard ENTRY()/ENDPROC(). For the code itself: * In sha1_block_data_order, use of TEQ with sp is deprecated in ARMv7 and not supported in Thumb. For the branches back to .L_00_15 and .L_40_59, the TEQ is converted to a CMP, under the assumption that clobbering the C flag here will not cause incorrect behaviour. For the first branch back to .L_20_39_or_60_79 the C flag is important, so sp is moved temporarily into another register so that TEQ can be used for the comparison. * In the AES code, most forms of register-indexed addressing with shifts and rotates are not permitted for loads and stores in Thumb, so the address calculation is done using a separate instruction for the Thumb case. The resulting code is unlikely to be optimally scheduled, but it should not have a large impact given the overall size of the code. I haven't run any benchmarks. Signed-off-by: Dave Martin Tested-by: David McCullough (ARM only) Acked-by: David McCullough Acked-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S index e59b1d5..19d6cd6 100644 --- a/arch/arm/crypto/aes-armv4.S +++ b/arch/arm/crypto/aes-armv4.S @@ -34,8 +34,9 @@ @ A little glue here to select the correct code below for the ARM CPU @ that is being targetted. +#include + .text -.code 32 .type AES_Te,%object .align 5 @@ -145,10 +146,8 @@ AES_Te: @ void AES_encrypt(const unsigned char *in, unsigned char *out, @ const AES_KEY *key) { -.global AES_encrypt -.type AES_encrypt,%function .align 5 -AES_encrypt: +ENTRY(AES_encrypt) sub r3,pc,#8 @ AES_encrypt stmdb sp!,{r1,r4-r12,lr} mov r12,r0 @ inp @@ -239,15 +238,8 @@ AES_encrypt: strb r6,[r12,#14] strb r3,[r12,#15] #endif -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size AES_encrypt,.-AES_encrypt +ENDPROC(AES_encrypt) .type _armv4_AES_encrypt,%function .align 2 @@ -386,10 +378,8 @@ _armv4_AES_encrypt: ldr pc,[sp],#4 @ pop and return .size _armv4_AES_encrypt,.-_armv4_AES_encrypt -.global private_AES_set_encrypt_key -.type private_AES_set_encrypt_key,%function .align 5 -private_AES_set_encrypt_key: +ENTRY(private_AES_set_encrypt_key) _armv4_AES_set_encrypt_key: sub r3,pc,#8 @ AES_set_encrypt_key teq r0,#0 @@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key: .Ldone: mov r0,#0 ldmia sp!,{r4-r12,lr} -.Labrt: tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key +.Labrt: mov pc,lr +ENDPROC(private_AES_set_encrypt_key) -.global private_AES_set_decrypt_key -.type private_AES_set_decrypt_key,%function .align 5 -private_AES_set_decrypt_key: +ENTRY(private_AES_set_decrypt_key) str lr,[sp,#-4]! @ push lr #if 0 @ kernel does both of these in setkey so optimise this bit out by @@ -748,15 +734,8 @@ private_AES_set_decrypt_key: bne .Lmix mov r0,#0 -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key +ENDPROC(private_AES_set_decrypt_key) .type AES_Td,%object .align 5 @@ -862,10 +841,8 @@ AES_Td: @ void AES_decrypt(const unsigned char *in, unsigned char *out, @ const AES_KEY *key) { -.global AES_decrypt -.type AES_decrypt,%function .align 5 -AES_decrypt: +ENTRY(AES_decrypt) sub r3,pc,#8 @ AES_decrypt stmdb sp!,{r1,r4-r12,lr} mov r12,r0 @ inp @@ -956,15 +933,8 @@ AES_decrypt: strb r6,[r12,#14] strb r3,[r12,#15] #endif -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size AES_decrypt,.-AES_decrypt +ENDPROC(AES_decrypt) .type _armv4_AES_decrypt,%function .align 2 @@ -1064,7 +1034,9 @@ _armv4_AES_decrypt: and r9,lr,r1,lsr#8 ldrb r7,[r10,r7] @ Td4[s1>>0] - ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] + ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24] + THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24] + THUMB( ldrb r1,[r1] ) ldrb r8,[r10,r8] @ Td4[s1>>16] eor r0,r7,r0,lsl#24 ldrb r9,[r10,r9] @ Td4[s1>>8] @@ -1077,7 +1049,9 @@ _armv4_AES_decrypt: ldrb r8,[r10,r8] @ Td4[s2>>0] and r9,lr,r2,lsr#16 - ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] + ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24] + THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24] + THUMB( ldrb r2,[r2] ) eor r0,r0,r7,lsl#8 ldrb r9,[r10,r9] @ Td4[s2>>16] eor r1,r8,r1,lsl#16 @@ -1090,7 +1064,9 @@ _armv4_AES_decrypt: and r9,lr,r3 @ i2 ldrb r9,[r10,r9] @ Td4[s3>>0] - ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] + ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24] + THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24] + THUMB( ldrb r3,[r3] ) eor r0,r0,r7,lsl#16 ldr r7,[r11,#0] eor r1,r1,r8,lsl#8 diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index 7050ab1..92c6eed 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S @@ -51,13 +51,12 @@ @ Profiler-assisted and platform-specific optimization resulted in 10% @ improvement on Cortex A8 core and 12.2 cycles per byte. -.text +#include -.global sha1_block_data_order -.type sha1_block_data_order,%function +.text .align 2 -sha1_block_data_order: +ENTRY(sha1_block_data_order) stmdb sp!,{r4-r12,lr} add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 ldmia r0,{r3,r4,r5,r6,r7} @@ -194,7 +193,7 @@ sha1_block_data_order: eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r3,r3,r10 @ E+=F_00_19(B,C,D) - teq r14,sp + cmp r14,sp bne .L_00_15 @ [((11+4)*5+2)*3] #if __ARM_ARCH__<7 ldrb r10,[r1,#2] @@ -374,7 +373,9 @@ sha1_block_data_order: @ F_xx_xx add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_20_39(B,C,D) - teq r14,sp @ preserve carry + ARM( teq r14,sp ) @ preserve carry + THUMB( mov r11,sp ) + THUMB( teq r14,r11 ) @ preserve carry bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes @@ -466,7 +467,7 @@ sha1_block_data_order: add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_40_59(B,C,D) add r3,r3,r11,ror#2 - teq r14,sp + cmp r14,sp bne .L_40_59 @ [+((12+5)*5+2)*4] ldr r8,.LK_60_79 @@ -485,19 +486,12 @@ sha1_block_data_order: teq r1,r2 bne .Lloop @ [+18], total 1307 -#if __ARM_ARCH__>=5 ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif .align 2 .LK_00_19: .word 0x5a827999 .LK_20_39: .word 0x6ed9eba1 .LK_40_59: .word 0x8f1bbcdc .LK_60_79: .word 0xca62c1d6 -.size sha1_block_data_order,.-sha1_block_data_order +ENDPROC(sha1_block_data_order) .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by " .align 2 -- cgit v0.10.2 From 0a301110b7bd33ef10164c184fe2c1d8c4c3ab6b Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 14 Jan 2013 15:54:28 +0000 Subject: ARM: smp: remove wrapper functions Remove some silly wrapper functions which aren't really required: platform_smp_prepare_cpus platform_secondary_init platform_cpu_die This simplifies the code. Signed-off-by: Russell King diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf..365c8d9 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -125,18 +125,6 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -static void __init platform_smp_prepare_cpus(unsigned int max_cpus) -{ - if (smp_ops.smp_prepare_cpus) - smp_ops.smp_prepare_cpus(max_cpus); -} - -static void __cpuinit platform_secondary_init(unsigned int cpu) -{ - if (smp_ops.smp_secondary_init) - smp_ops.smp_secondary_init(cpu); -} - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { if (smp_ops.smp_boot_secondary) @@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) return 1; } -static void platform_cpu_die(unsigned int cpu) -{ - if (smp_ops.cpu_die) - smp_ops.cpu_die(cpu); -} - static int platform_cpu_disable(unsigned int cpu) { if (smp_ops.cpu_disable) @@ -257,7 +239,8 @@ void __ref cpu_die(void) * actual CPU shutdown procedure is at least platform (if not * CPU) specific. */ - platform_cpu_die(cpu); + if (smp_ops.cpu_die) + smp_ops.cpu_die(cpu); /* * Do not return to the idle loop - jump back to the secondary @@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * Give the platform a chance to do its own initialisation. */ - platform_secondary_init(cpu); + if (smp_ops.smp_secondary_init) + smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); @@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should - * re-initialize the map in platform_smp_prepare_cpus() if - * present != possible (e.g. physical hotplug). + * re-initialize the map in the platforms smp_prepare_cpus() + * if present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); @@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) * Initialise the SCU if there are more than one CPU * and let them know where to start. */ - platform_smp_prepare_cpus(max_cpus); + if (smp_ops.smp_prepare_cpus) + smp_ops.smp_prepare_cpus(max_cpus); } } -- cgit v0.10.2 From 8259293aee1d44d7ebf0c644ef48db835653ebc3 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 9 Jan 2013 11:15:26 +0100 Subject: ARM: 7622/1: mmc: mmci: Gate the clock in runtime suspend to save power The amba bus is already performing same actions but for the apb_pclk. So here we just make sure the clock to card is gated as well to save more power. At runtime resume we will thus restore the clock again. Signed-off-by: Ulf Hansson Acked-by: Kevin Liu Signed-off-by: Russell King diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 3094ea3e..b43e1ab 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1614,8 +1614,37 @@ static int mmci_resume(struct device *dev) } #endif +#ifdef CONFIG_PM_RUNTIME +static int mmci_runtime_suspend(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_disable_unprepare(host->clk); + } + + return 0; +} + +static int mmci_runtime_resume(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_prepare_enable(host->clk); + } + + return 0; +} +#endif + static const struct dev_pm_ops mmci_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) + SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; static struct amba_id mmci_ids[] = { -- cgit v0.10.2 From f4670daef5e81aa312f5a18f571dbd8169ce079e Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 9 Jan 2013 17:19:54 +0100 Subject: ARM: 7623/1: mmc: mmci: Fixup clock gating when freq is 0 for ST-variants In the ST Micro variant, the MMCICLOCK register must not be used to gate the clock. Instead use MMCIPOWER register and by clearing the PWR_ON bit to do this. Signed-off-by: Johan Rudholm Signed-off-by: Ulf Hansson Signed-off-by: Russell King diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index b43e1ab..04efcdf 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -60,6 +60,7 @@ static unsigned int fmax = 515633; * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register * @pwrreg_powerup: power up value for MMCIPOWER register * @signal_direction: input/out direction of bus signals can be indicated + * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock */ struct variant_data { unsigned int clkreg; @@ -72,6 +73,7 @@ struct variant_data { bool blksz_datactrl16; u32 pwrreg_powerup; bool signal_direction; + bool pwrreg_clkgate; }; static struct variant_data variant_arm = { @@ -96,6 +98,7 @@ static struct variant_data variant_u300 = { .sdio = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_nomadik = { @@ -107,6 +110,7 @@ static struct variant_data variant_nomadik = { .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_ux500 = { @@ -119,6 +123,7 @@ static struct variant_data variant_ux500 = { .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_ux500v2 = { @@ -132,6 +137,7 @@ static struct variant_data variant_ux500v2 = { .blksz_datactrl16 = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; /* @@ -1150,6 +1156,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } + /* + * If clock = 0 and the variant requires the MMCIPOWER to be used for + * gating the clock, the MCI_PWR_ON bit is cleared. + */ + if (!ios->clock && variant->pwrreg_clkgate) + pwr &= ~MCI_PWR_ON; + spin_lock_irqsave(&host->lock, flags); mmci_set_clkreg(host, ios->clock); -- cgit v0.10.2 From 3a37298ada749897ec2c5c1cdcd5932218eb2928 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Thu, 24 Jan 2013 14:12:45 +0100 Subject: ARM: 7631/1: mmc: mmci: Add new VE MMCI variant The Versatile Express IOFPGA as shipped on VECD 5.0 (bitfiles v108/208 and v116/216) contains a modified version of the PL180 MMCI, with PeriphID Configuration value changed to 0x2. This version adds an optional "hardware flow control" feature. When enabled MMC card clock will be automatically disabled when FIFO is about to over/underflow and re-enabled once the host retrieved some data. This makes the controller immune to over/underrun errors caused by big interrupt handling latencies. This patch adds relevant device variant in the driver. Signed-off-by: Pawel Moll Reviewed-by: Linus Walleij Acked-by: Ulf Hansson Signed-off-by: Russell King diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 04efcdf..dee9b73 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -90,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = { .pwrreg_powerup = MCI_PWR_UP, }; +static struct variant_data variant_arm_extended_fifo_hwfc = { + .fifosize = 128 * 4, + .fifohalfsize = 64 * 4, + .clkreg_enable = MCI_ARM_HWFCEN, + .datalength_bits = 16, + .pwrreg_powerup = MCI_PWR_UP, +}; + static struct variant_data variant_u300 = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, @@ -1672,6 +1680,11 @@ static struct amba_id mmci_ids[] = { .data = &variant_arm_extended_fifo, }, { + .id = 0x02041180, + .mask = 0xff0fffff, + .data = &variant_arm_extended_fifo_hwfc, + }, + { .id = 0x00041181, .mask = 0x000fffff, .data = &variant_arm, diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index a22213e..1f33ad5 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -28,6 +28,8 @@ #define MCI_ST_UX500_NEG_EDGE (1 << 13) #define MCI_ST_UX500_HWFCEN (1 << 14) #define MCI_ST_UX500_CLK_INV (1 << 15) +/* Modified PL180 on Versatile Express platform */ +#define MCI_ARM_HWFCEN (1 << 12) #define MMCIARGUMENT 0x008 #define MMCICOMMAND 0x00c -- cgit v0.10.2 From 20e260b6f4f717c100620122f626a2c06a4cfd72 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 24 Jan 2013 14:47:38 +0100 Subject: ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path When unlocking a spinlock, all we need to do is increment the owner field of the lock. Since only one CPU can be performing an unlock() operation for a given lock, this doesn't need to be exclusive. This patch simplifies arch_spin_unlock to use non-exclusive accesses when updating the owner field of the lock. Signed-off-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index b4ca707..6220e9f 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock) { - unsigned long tmp; - u32 slock; - smp_mb(); - - __asm__ __volatile__( -" mov %1, #1\n" -"1: ldrex %0, [%2]\n" -" uadd16 %0, %0, %1\n" -" strex %1, %0, [%2]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (slock), "=&r" (tmp) - : "r" (&lock->slock) - : "cc"); - + lock->tickets.owner++; dsb_sev(); } -- cgit v0.10.2 From 653a761e4ba70bf12174dedcd5fbbf54f431abb3 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 21 Jan 2013 21:29:34 +0100 Subject: ARM: 7630/1: mmc: mmci: Fixup and cleanup code for DMA handling The cookie is now used to indicate if dma_unmap_sg shall be done in post_request. At DMA errors, the DMA job is immediately not only terminated but also unmapped. To indicate that this has been done the cookie is reset to zero. post_request will thus only do dma_umap_sg for requests which has a cookie not set to zero. Some corresponding duplicated code could then be removed and moreover some corrections at DMA errors for terminating the same DMA job twice has also been fixed. Signed-off-by: Ulf Hansson Signed-off-by: Per Forlin Acked-by: Linus Walleij Signed-off-by: Russell King diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index dee9b73..372e921 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -149,6 +149,24 @@ static struct variant_data variant_ux500v2 = { }; /* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + if (!data) + return 0; + + if (!is_power_of_2(data->blksz)) { + dev_err(mmc_dev(host->mmc), + "unsupported block size (%d bytes)\n", data->blksz); + return -EINVAL; + } + + return 0; +} + +/* * This must be called with host->lock held */ static void mmci_write_clkreg(struct mmci_host *host, u32 clk) @@ -370,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host) host->dma_rx_channel = host->dma_tx_channel = NULL; } +static void mmci_dma_data_error(struct mmci_host *host) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; + host->data->host_cookie = 0; +} + static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { - struct dma_chan *chan = host->dma_current; + struct dma_chan *chan; enum dma_data_direction dir; + + if (data->flags & MMC_DATA_READ) { + dir = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + dir = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); +} + +static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ u32 status; int i; @@ -392,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) * contiguous buffers. On TX, we'll get a FIFO underrun error. */ if (status & MCI_RXDATAAVLBLMASK) { - dmaengine_terminate_all(chan); + mmci_dma_data_error(host); if (!data->error) data->error = -EIO; } - if (data->flags & MMC_DATA_WRITE) { - dir = DMA_TO_DEVICE; - } else { - dir = DMA_FROM_DEVICE; - } - if (!data->host_cookie) - dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + mmci_dma_unmap(host, data); /* * Use of DMA with scatter-gather is impossible. @@ -414,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); mmci_dma_release(host); } -} -static void mmci_dma_data_error(struct mmci_host *host) -{ - dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); - dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; } -static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, - struct mmci_host_next *next) +/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ +static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, + struct dma_chan **dma_chan, + struct dma_async_tx_descriptor **dma_desc) { struct variant_data *variant = host->variant; struct dma_slave_config conf = { @@ -441,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, enum dma_data_direction buffer_dirn; int nr_sg; - /* Check if next job is already prepared */ - if (data->host_cookie && !next && - host->dma_current && host->dma_desc_current) - return 0; - - if (!next) { - host->dma_current = NULL; - host->dma_desc_current = NULL; - } - if (data->flags & MMC_DATA_READ) { conf.direction = DMA_DEV_TO_MEM; buffer_dirn = DMA_FROM_DEVICE; @@ -480,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, if (!desc) goto unmap_exit; - if (next) { - next->dma_chan = chan; - next->dma_desc = desc; - } else { - host->dma_current = chan; - host->dma_desc_current = desc; - } + *dma_chan = chan; + *dma_desc = desc; return 0; unmap_exit: - if (!next) - dmaengine_terminate_all(chan); dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); return -ENOMEM; } +static inline int mmci_dma_prep_data(struct mmci_host *host, + struct mmc_data *data) +{ + /* Check if next job is already prepared. */ + if (host->dma_current && host->dma_desc_current) + return 0; + + /* No job were prepared thus do it now. */ + return __mmci_dma_prep_data(host, data, &host->dma_current, + &host->dma_desc_current); +} + +static inline int mmci_dma_prep_next(struct mmci_host *host, + struct mmc_data *data) +{ + struct mmci_host_next *nd = &host->next_data; + return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); +} + static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) { int ret; struct mmc_data *data = host->data; - ret = mmci_dma_prep_data(host, host->data, NULL); + ret = mmci_dma_prep_data(host, host->data); if (ret) return ret; @@ -532,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) { struct mmci_host_next *next = &host->next_data; - if (data->host_cookie && data->host_cookie != next->cookie) { - pr_warning("[%s] invalid cookie: data->host_cookie %d" - " host->next_data.cookie %d\n", - __func__, data->host_cookie, host->next_data.cookie); - data->host_cookie = 0; - } - - if (!data->host_cookie) - return; + WARN_ON(data->host_cookie && data->host_cookie != next->cookie); + WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); host->dma_desc_current = next->dma_desc; host->dma_current = next->dma_chan; - next->dma_desc = NULL; next->dma_chan = NULL; } @@ -559,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, if (!data) return; - if (data->host_cookie) { - data->host_cookie = 0; + BUG_ON(data->host_cookie); + + if (mmci_validate_data(host, data)) return; - } - /* if config for dma */ - if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || - ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { - if (mmci_dma_prep_data(host, data, nd)) - data->host_cookie = 0; - else - data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; - } + if (!mmci_dma_prep_next(host, data)) + data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; } static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, @@ -579,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, { struct mmci_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - struct dma_chan *chan; - enum dma_data_direction dir; - if (!data) + if (!data || !data->host_cookie) return; - if (data->flags & MMC_DATA_READ) { - dir = DMA_FROM_DEVICE; - chan = host->dma_rx_channel; - } else { - dir = DMA_TO_DEVICE; - chan = host->dma_tx_channel; - } + mmci_dma_unmap(host, data); + if (err) { + struct mmci_host_next *next = &host->next_data; + struct dma_chan *chan; + if (data->flags & MMC_DATA_READ) + chan = host->dma_rx_channel; + else + chan = host->dma_tx_channel; + dmaengine_terminate_all(chan); - /* if config for dma */ - if (chan) { - if (err) - dmaengine_terminate_all(chan); - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, dir); - mrq->data->host_cookie = 0; + next->dma_desc = NULL; + next->dma_chan = NULL; } } @@ -622,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { } +static inline void mmci_dma_finalize(struct mmci_host *host, + struct mmc_data *data) +{ +} + static inline void mmci_dma_data_error(struct mmci_host *host) { } @@ -772,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, u32 remain, success; /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, data); + } /* * Calculate how far we are into the transfer. Note that @@ -812,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & MCI_DATAEND || data->error) { if (dma_inprogress(host)) - mmci_dma_unmap(host, data); + mmci_dma_finalize(host, data); mmci_stop_data(host); if (!data->error) @@ -849,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, if (!cmd->data || cmd->error) { if (host->data) { /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, host->data); + } mmci_stop_data(host); } mmci_request_end(host, cmd->mrq); @@ -1076,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) WARN_ON(host->mrq != NULL); - if (mrq->data && !is_power_of_2(mrq->data->blksz)) { - dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", - mrq->data->blksz); - mrq->cmd->error = -EINVAL; + mrq->cmd->error = mmci_validate_data(host, mrq->data); + if (mrq->cmd->error) { mmc_request_done(mmc, mrq); return; } -- cgit v0.10.2 From 4e79a62d84b9a3b84c609d73382415c71d911a4c Mon Sep 17 00:00:00 2001 From: Barry Song Date: Mon, 4 Feb 2013 07:46:57 +0100 Subject: ARM: 7639/1: cache-l2x0: add missed dummy outer_resume entry Commit 91c2ebb90b1890a (ARM: 7114/1: cache-l2x0: add resume entry for l2 in secure mode) added resume entry for l2 in secure mode, but it missed the dummy entry when CONFIG_CACHE_L2X0 is not set. (Commit text edited by rmk.) Signed-off-by: Barry Song Signed-off-by: Russell King diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 53426c6..12f71a1 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) static inline void outer_flush_all(void) { } static inline void outer_inv_all(void) { } static inline void outer_disable(void) { } +static inline void outer_resume(void) { } #endif -- cgit v0.10.2 From e3e92a7be6936dff1de80e66b0b683d54e9e02d8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 28 Jan 2013 21:58:22 +0100 Subject: ARM: 7635/1: versatile: fix the PCI IRQ regression The PCI IRQs were regressing due to two things: - The PCI glue layer was using an hard-coded IRQ 27 offset. This caused the immediate regression. - The SIC IRQ mask was inverted (i.e. a bit was indeed set to one for each valid IRQ on the SIC, but accidentally inverted in the init call). This has been around forever, but we have been saved by some other forgiving code that would reserve IRQ descriptors in this range, as the versatile is non-sparse. When the IRQs were bumped up 32 steps so as to avoid using IRQ zero and avoid touching the 16 legacy IRQs, things broke. Introduce an explicit valid mask for the IRQs that are active on the PIC/SIC, and pass that. Use the BIT() macro from to make sure we hit the right bits, readily defined in . Reported-by: Tetsuo Handa Signed-off-by: Linus Walleij Signed-off-by: Russell King diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 5d59294..a78827b 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -65,16 +66,28 @@ #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) +/* These PIC IRQs are valid in each configuration */ +#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \ + BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \ + BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \ + BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \ + BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \ + BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \ + BIT(SIC_INT_PCI3) #if 1 #define IRQ_MMCI0A IRQ_VICSOURCE22 #define IRQ_AACI IRQ_VICSOURCE24 #define IRQ_ETH IRQ_VICSOURCE25 #define PIC_MASK 0xFFD00000 +#define PIC_VALID PIC_VALID_ALL #else #define IRQ_MMCI0A IRQ_SIC_MMCI0A #define IRQ_AACI IRQ_SIC_AACI #define IRQ_ETH IRQ_SIC_ETH #define PIC_MASK 0 +#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \ + BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \ + BIT(SIC_INT_ETH) #endif /* Lookup table for finding a DT node that represents the vic instance */ @@ -102,7 +115,7 @@ void __init versatile_init_irq(void) VERSATILE_SIC_BASE); fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, - IRQ_VICSOURCE31, ~PIC_MASK, np); + IRQ_VICSOURCE31, PIC_VALID, np); /* * Interrupts on secondary controller from 0 to 8 are routed to diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 2f84f40..e92e5e0 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -23,6 +23,7 @@ #include #include +#include #include #include @@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) int irq; /* slot, pin, irq - * 24 1 27 - * 25 1 28 - * 26 1 29 - * 27 1 30 + * 24 1 IRQ_SIC_PCI0 + * 25 1 IRQ_SIC_PCI1 + * 26 1 IRQ_SIC_PCI2 + * 27 1 IRQ_SIC_PCI3 */ - irq = 27 + ((slot - 24 + pin - 1) & 3); + irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3); return irq; } -- cgit v0.10.2 From 7c4e9ced424be4d36df6a3e3825763e97ee97607 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 9 Feb 2013 05:52:45 +0100 Subject: ARM: 7643/1: sched: correct update_sched_clock() If we want load epoch_cyc and epoch_ns atomically, we should update epoch_cyc_copy first of all. This notify reader that updating is in progress. If we update epoch_cyc first like as current implementation, there is subtle error case. Look at the below example. cyc = 9 ns = 900 cyc_copy = 9 == CASE 1 == write cyc = 10 read cyc = 10 read ns = 900 write ns = 1000 write cyc_copy = 10 read cyc_copy = 10 output = (10, 900) == CASE 2 == read cyc = 9 write cyc = 10 write ns = 1000 read ns = 1000 read cyc_copy = 9 write cyc_copy = 10 output = (9, 1000) If atomic read is ensured, output should be (9, 900) or (10, 1000). But, output in example case are not. So, change updating sequence in order to correct this problem. Cc: Signed-off-by: Joonsoo Kim Signed-off-by: Russell King diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index fc6692e..bd6f56b 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); - cd.epoch_cyc = cyc; + cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); - cd.epoch_cyc_copy = cyc; + cd.epoch_cyc = cyc; raw_local_irq_restore(flags); } -- cgit v0.10.2 From 5d1c20bce5bc1a7a94bca1324451532c28c89e36 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 31 Jan 2013 19:19:30 +0100 Subject: ARM: 7637/1: memory: use SZ_ constants for defining the virtual memory layout Parts of the virtual memory layout (mainly the modules area) are described using open-coded immediate values. Use the SZ_ definitions from linux/sizes.h instead to make the code clearer. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03a..924320f 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -36,23 +36,23 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) -#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) +#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) /* * The maximum size of a 26-bit user space task. */ -#define TASK_SIZE_26 UL(0x04000000) +#define TASK_SIZE_26 (UL(1) << 26) /* * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. */ #ifndef CONFIG_THUMB2_KERNEL -#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) +#define MODULES_VADDR (PAGE_OFFSET - SZ_16M) #else /* smaller range for Thumb-2 symbols relocation (2^24)*/ -#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) +#define MODULES_VADDR (PAGE_OFFSET - SZ_8M) #endif #if TASK_SIZE > MODULES_VADDR -- cgit v0.10.2 From 1b4304e5ca4e7a52369cd5c381ba5527aaa5082e Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 Feb 2013 15:26:27 +0000 Subject: MAINTAINERS: add maintainer entry for AMBA serial drivers Add the proper maintainer entry for the AMBA serial drivers. Signed-off-by: Russell King diff --git a/MAINTAINERS b/MAINTAINERS index 915564e..d2a7cd4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -673,6 +673,12 @@ ARM PRIMECELL MMCI PL180/1 DRIVER S: Orphan F: drivers/mmc/host/mmci.* +ARM PRIMECELL UART PL010 AND PL011 DRIVERS +M: Russell King +S: Maintained +F: drivers/tty/serial/amba-pl01*.c +F: include/linux/amba/serial.h + ARM PRIMECELL BUS SUPPORT M: Russell King S: Maintained -- cgit v0.10.2 From 2f748aaad7f4575bc5377072fc9d5707e48e6733 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 Feb 2013 15:28:05 +0000 Subject: MAINTAINERS: add additional file for MMCI driver The MMCI driver has an additional file in include/linux/amba. Include this file in its maintainer entry. Signed-off-by: Russell King diff --git a/MAINTAINERS b/MAINTAINERS index d2a7cd4..914e236 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -672,6 +672,7 @@ F: include/linux/amba/kmi.h ARM PRIMECELL MMCI PL180/1 DRIVER S: Orphan F: drivers/mmc/host/mmci.* +F: include/linux/amba/mmci.h ARM PRIMECELL UART PL010 AND PL011 DRIVERS M: Russell King -- cgit v0.10.2 From 08a5c9a2bb5d572b3bde189ab80f8b6ee71a1b30 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 Feb 2013 15:28:51 +0000 Subject: MAINTAINERS: Re-assert MMCI driver maintainer status Signed-off-by: Russell King diff --git a/MAINTAINERS b/MAINTAINERS index 914e236..c6c1973 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -670,7 +670,8 @@ F: drivers/input/serio/ambakmi.* F: include/linux/amba/kmi.h ARM PRIMECELL MMCI PL180/1 DRIVER -S: Orphan +M: Russell King +S: Maintained F: drivers/mmc/host/mmci.* F: include/linux/amba/mmci.h -- cgit v0.10.2 From 48dc369d21b41f6400d15cdaf9411e2e6fd62323 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 9 Feb 2013 06:28:04 +0100 Subject: ARM: 7644/1: vmregion: remove vmregion code entirely Now, there is no user for vmregion. So remove it. Acked-by: Nicolas Pitre Tested-by: Santosh Shilimkar Signed-off-by: Joonsoo Kim Signed-off-by: Russell King diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 8a9c4cb..4e333fa 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ iomap.o obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ - mmap.o pgd.o mmu.o vmregion.o + mmap.o pgd.o mmu.o ifneq ($(CONFIG_MMU),y) obj-y += nommu.o diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c deleted file mode 100644 index a631016..0000000 --- a/arch/arm/mm/vmregion.c +++ /dev/null @@ -1,205 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "vmregion.h" - -/* - * VM region handling support. - * - * This should become something generic, handling VM region allocations for - * vmalloc and similar (ioremap, module space, etc). - * - * I envisage vmalloc()'s supporting vm_struct becoming: - * - * struct vm_struct { - * struct vmregion region; - * unsigned long flags; - * struct page **pages; - * unsigned int nr_pages; - * unsigned long phys_addr; - * }; - * - * get_vm_area() would then call vmregion_alloc with an appropriate - * struct vmregion head (eg): - * - * struct vmregion vmalloc_head = { - * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), - * .vm_start = VMALLOC_START, - * .vm_end = VMALLOC_END, - * }; - * - * However, vmalloc_head.vm_start is variable (typically, it is dependent on - * the amount of RAM found at boot time.) I would imagine that get_vm_area() - * would have to initialise this each time prior to calling vmregion_alloc(). - */ - -struct arm_vmregion * -arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, - size_t size, gfp_t gfp, const void *caller) -{ - unsigned long start = head->vm_start, addr = head->vm_end; - unsigned long flags; - struct arm_vmregion *c, *new; - - if (head->vm_end - head->vm_start < size) { - printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", - __func__, size); - goto out; - } - - new = kmalloc(sizeof(struct arm_vmregion), gfp); - if (!new) - goto out; - - new->caller = caller; - - spin_lock_irqsave(&head->vm_lock, flags); - - addr = rounddown(addr - size, align); - list_for_each_entry_reverse(c, &head->vm_list, vm_list) { - if (addr >= c->vm_end) - goto found; - addr = rounddown(c->vm_start - size, align); - if (addr < start) - goto nospc; - } - - found: - /* - * Insert this entry after the one we found. - */ - list_add(&new->vm_list, &c->vm_list); - new->vm_start = addr; - new->vm_end = addr + size; - new->vm_active = 1; - - spin_unlock_irqrestore(&head->vm_lock, flags); - return new; - - nospc: - spin_unlock_irqrestore(&head->vm_lock, flags); - kfree(new); - out: - return NULL; -} - -static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) -{ - struct arm_vmregion *c; - - list_for_each_entry(c, &head->vm_list, vm_list) { - if (c->vm_active && c->vm_start == addr) - goto out; - } - c = NULL; - out: - return c; -} - -struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) -{ - struct arm_vmregion *c; - unsigned long flags; - - spin_lock_irqsave(&head->vm_lock, flags); - c = __arm_vmregion_find(head, addr); - spin_unlock_irqrestore(&head->vm_lock, flags); - return c; -} - -struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) -{ - struct arm_vmregion *c; - unsigned long flags; - - spin_lock_irqsave(&head->vm_lock, flags); - c = __arm_vmregion_find(head, addr); - if (c) - c->vm_active = 0; - spin_unlock_irqrestore(&head->vm_lock, flags); - return c; -} - -void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) -{ - unsigned long flags; - - spin_lock_irqsave(&head->vm_lock, flags); - list_del(&c->vm_list); - spin_unlock_irqrestore(&head->vm_lock, flags); - - kfree(c); -} - -#ifdef CONFIG_PROC_FS -static int arm_vmregion_show(struct seq_file *m, void *p) -{ - struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); - - seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, - c->vm_end - c->vm_start); - if (c->caller) - seq_printf(m, " %pS", (void *)c->caller); - seq_putc(m, '\n'); - return 0; -} - -static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) -{ - struct arm_vmregion_head *h = m->private; - spin_lock_irq(&h->vm_lock); - return seq_list_start(&h->vm_list, *pos); -} - -static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) -{ - struct arm_vmregion_head *h = m->private; - return seq_list_next(p, &h->vm_list, pos); -} - -static void arm_vmregion_stop(struct seq_file *m, void *p) -{ - struct arm_vmregion_head *h = m->private; - spin_unlock_irq(&h->vm_lock); -} - -static const struct seq_operations arm_vmregion_ops = { - .start = arm_vmregion_start, - .stop = arm_vmregion_stop, - .next = arm_vmregion_next, - .show = arm_vmregion_show, -}; - -static int arm_vmregion_open(struct inode *inode, struct file *file) -{ - struct arm_vmregion_head *h = PDE(inode)->data; - int ret = seq_open(file, &arm_vmregion_ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = h; - } - return ret; -} - -static const struct file_operations arm_vmregion_fops = { - .open = arm_vmregion_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) -{ - proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); - return 0; -} -#else -int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) -{ - return 0; -} -#endif diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h deleted file mode 100644 index 0f5a5f2..0000000 --- a/arch/arm/mm/vmregion.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef VMREGION_H -#define VMREGION_H - -#include -#include - -struct page; - -struct arm_vmregion_head { - spinlock_t vm_lock; - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; -}; - -struct arm_vmregion { - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; - int vm_active; - const void *caller; -}; - -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); -struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); -struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); -void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); - -int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); - -#endif -- cgit v0.10.2 From ed8fd2186a4e4f3b98434093b56f9b793d48443e Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 9 Feb 2013 06:28:05 +0100 Subject: ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area In current implementation, we used ARM-specific flag, that is, VM_ARM_STATIC_MAPPING, for distinguishing ARM specific static mapped area. The purpose of static mapped area is to re-use static mapped area when entire physical address range of the ioremap request can be covered by this area. This implementation causes needless overhead for some cases. For example, assume that there is only one static mapped area and vmlist has 300 areas. Every time we call ioremap, we check 300 areas for deciding whether it is matched or not. Moreover, even if there is no static mapped area and vmlist has 300 areas, every time we call ioremap, we check 300 areas in now. If we construct a extra list for static mapped area, we can eliminate above mentioned overhead. With a extra list, if there is one static mapped area, we just check only one area and proceed next operation quickly. In fact, it is not a critical problem, because ioremap is not frequently used. But reducing overhead is better idea. Another reason for doing this work is for removing architecture dependency on vmalloc layer. I think that vmlist and vmlist_lock is internal data structure for vmalloc layer. Some codes for debugging and stat inevitably use vmlist and vmlist_lock. But it is preferable that they are used as least as possible in outside of vmalloc.c Now, I introduce an ARM-specific infrastructure for static mapped area. In the following patch, we will use this and resolve above mentioned problem. Reviewed-by: Nicolas Pitre Tested-by: Santosh Shilimkar Signed-off-by: Joonsoo Kim Signed-off-by: Russell King diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 88fd86c..904c15e 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -39,6 +39,70 @@ #include #include "mm.h" + +LIST_HEAD(static_vmlist); + +static struct static_vm *find_static_vm_paddr(phys_addr_t paddr, + size_t size, unsigned int mtype) +{ + struct static_vm *svm; + struct vm_struct *vm; + + list_for_each_entry(svm, &static_vmlist, list) { + vm = &svm->vm; + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + continue; + if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) + continue; + + if (vm->phys_addr > paddr || + paddr + size - 1 > vm->phys_addr + vm->size - 1) + continue; + + return svm; + } + + return NULL; +} + +struct static_vm *find_static_vm_vaddr(void *vaddr) +{ + struct static_vm *svm; + struct vm_struct *vm; + + list_for_each_entry(svm, &static_vmlist, list) { + vm = &svm->vm; + + /* static_vmlist is ascending order */ + if (vm->addr > vaddr) + break; + + if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) + return svm; + } + + return NULL; +} + +void __init add_static_vm_early(struct static_vm *svm) +{ + struct static_vm *curr_svm; + struct vm_struct *vm; + void *vaddr; + + vm = &svm->vm; + vm_area_add_early(vm); + vaddr = vm->addr; + + list_for_each_entry(curr_svm, &static_vmlist, list) { + vm = &curr_svm->vm; + + if (vm->addr > vaddr) + break; + } + list_add_tail(&svm->list, &curr_svm->list); +} + int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a8ee92d..d5a4e9a 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -1,4 +1,6 @@ #ifdef CONFIG_MMU +#include +#include /* the upper-most page table pointer */ extern pmd_t *top_pmd; @@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page /* consistent regions used by dma_alloc_attrs() */ #define VM_ARM_DMA_CONSISTENT 0x20000000 + +struct static_vm { + struct vm_struct vm; + struct list_head list; +}; + +extern struct list_head static_vmlist; +extern struct static_vm *find_static_vm_vaddr(void *vaddr); +extern __init void add_static_vm_early(struct static_vm *svm); + #endif #ifdef CONFIG_ZONE_DMA -- cgit v0.10.2 From 101eeda38c0ab8a4f916176e325d9e036d981a24 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 9 Feb 2013 06:28:06 +0100 Subject: ARM: 7646/1: mm: use static_vm for managing static mapped areas A static mapped area is ARM-specific, so it is better not to use generic vmalloc data structure, that is, vmlist and vmlist_lock for managing static mapped area. And it causes some needless overhead and reducing this overhead is better idea. Now, we have newly introduced static_vm infrastructure. With it, we don't need to iterate all mapped areas. Instead, we just iterate static mapped areas. It helps to reduce an overhead of finding matched area. And architecture dependency on vmalloc layer is removed, so it will help to maintainability for vmalloc layer. Reviewed-by: Nicolas Pitre Acked-by: Rob Herring Tested-by: Santosh Shilimkar Signed-off-by: Joonsoo Kim Signed-off-by: Russell King diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 904c15e..04d9006 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, const struct mem_type *type; int err; unsigned long addr; - struct vm_struct * area; + struct vm_struct *area; + phys_addr_t paddr = __pfn_to_phys(pfn); #ifndef CONFIG_ARM_LPAE /* * High mappings must be supersection aligned */ - if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) + if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) return NULL; #endif @@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, /* * Try to reuse one of the static mapping whenever possible. */ - read_lock(&vmlist_lock); - for (area = vmlist; area; area = area->next) { - if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) - break; - if (!(area->flags & VM_ARM_STATIC_MAPPING)) - continue; - if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) - continue; - if (__phys_to_pfn(area->phys_addr) > pfn || - __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) - continue; - /* we can drop the lock here as we know *area is static */ - read_unlock(&vmlist_lock); - addr = (unsigned long)area->addr; - addr += __pfn_to_phys(pfn) - area->phys_addr; - return (void __iomem *) (offset + addr); + if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { + struct static_vm *svm; + + svm = find_static_vm_paddr(paddr, size, mtype); + if (svm) { + addr = (unsigned long)svm->vm.addr; + addr += paddr - svm->vm.phys_addr; + return (void __iomem *) (offset + addr); + } } - read_unlock(&vmlist_lock); /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ @@ -312,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if (!area) return NULL; addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(pfn); + area->phys_addr = paddr; #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && pfn >= 0x100000 && - !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { + !((paddr | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_supersections(addr, pfn, size, type); - } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { + } else if (!((paddr | size | addr) & ~PMD_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_sections(addr, pfn, size, type); } else #endif - err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), + err = ioremap_page_range(addr, addr + size, paddr, __pgprot(type->prot_pte)); if (err) { @@ -410,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); - struct vm_struct *vm; + struct static_vm *svm; + + /* If this is a static mapping, we must leave it alone */ + svm = find_static_vm_vaddr(addr); + if (svm) + return; - read_lock(&vmlist_lock); - for (vm = vmlist; vm; vm = vm->next) { - if (vm->addr > addr) - break; - if (!(vm->flags & VM_IOREMAP)) - continue; - /* If this is a static mapping we must leave it alone */ - if ((vm->flags & VM_ARM_STATIC_MAPPING) && - (vm->addr <= addr) && (vm->addr + vm->size > addr)) { - read_unlock(&vmlist_lock); - return; - } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) + { + struct vm_struct *vm; + + vm = find_vm_area(addr); + /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. */ - if ((vm->addr == addr) && - (vm->flags & VM_ARM_SECTION_MAPPING)) { + if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) unmap_area_sections((unsigned long)vm->addr, vm->size); - break; - } -#endif } - read_unlock(&vmlist_lock); +#endif vunmap(addr); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f06102..a35b314 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) { struct map_desc *md; struct vm_struct *vm; + struct static_vm *svm; if (!nr) return; - vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); + svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); for (md = io_desc; nr; md++, nr--) { create_mapping(md); + + vm = &svm->vm; vm->addr = (void *)(md->virtual & PAGE_MASK); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); vm->phys_addr = __pfn_to_phys(md->pfn); vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags |= VM_ARM_MTYPE(md->type); vm->caller = iotable_init; - vm_area_add_early(vm++); + add_static_vm_early(svm++); } } @@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, void *caller) { struct vm_struct *vm; + struct static_vm *svm; + + svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); - vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); + vm = &svm->vm; vm->addr = (void *)addr; vm->size = size; vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->caller = caller; - vm_area_add_early(vm); + add_static_vm_early(svm); } #ifndef CONFIG_ARM_LPAE @@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) static void __init fill_pmd_gaps(void) { + struct static_vm *svm; struct vm_struct *vm; unsigned long addr, next = 0; pmd_t *pmd; - /* we're still single threaded hence no lock needed here */ - for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) - continue; + list_for_each_entry(svm, &static_vmlist, list) { + vm = &svm->vm; addr = (unsigned long)vm->addr; if (addr < next) continue; @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) { struct vm_struct *vm; unsigned long addr; + struct static_vm *svm; - /* we're still single threaded hence no lock needed here */ - for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) - continue; - addr = (unsigned long)vm->addr; - addr &= ~(SZ_2M - 1); - if (addr == PCI_IO_VIRT_BASE) - return; + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); + if (svm) + return; - } vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); } #else -- cgit v0.10.2 From bce2bd3cda4e73cd92cf08e017c073058c151ddc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 14 Feb 2013 10:50:04 +0000 Subject: ARM: fix warnings introduced by previous patch 869486d5f51 (ARM: 7646/1: mm: use static_vm for managing static mapped areas) introduced new warnings: arch/arm/mm/mmu.c: In function 'pci_reserve_io': arch/arm/mm/mmu.c:888:16: warning: unused variable 'addr' arch/arm/mm/mmu.c:887:20: warning: unused variable 'vm' because it failed to delete the two local variables it no longer used. Fix this. Signed-off-by: Russell King diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a35b314..3da1d3e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -862,8 +862,6 @@ static void __init fill_pmd_gaps(void) #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) static void __init pci_reserve_io(void) { - struct vm_struct *vm; - unsigned long addr; struct static_vm *svm; svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); -- cgit v0.10.2 From 276fd3c5f8a21f9d7d398383b818d5d0ac81db23 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 11 Feb 2013 08:44:47 +0100 Subject: ARM: 7647/1: pci: Keep pci_common_init() around after init When using deferred driver probing, PCI host controller drivers may actually require this function after the init stage. Signed-off-by: Thierry Reding Signed-off-by: Russell King diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 379cf32..da7b0c9 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return irq; } -static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) +static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) { int ret; struct pci_host_bridge_window *window; @@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) return 0; } -static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) +static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) { struct pci_sys_data *sys = NULL; int ret; @@ -493,7 +493,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) } } -void __init pci_common_init(struct hw_pci *hw) +void pci_common_init(struct hw_pci *hw) { struct pci_sys_data *sys; LIST_HEAD(head); -- cgit v0.10.2 From 352af7d4dd90bac3640ad2383e4e9f332d3a4537 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 11 Feb 2013 08:46:10 +0100 Subject: ARM: 7648/1: pci: Allow passing per-controller private data In order to allow drivers to specify private data for each controller, this commit adds a private_data field to the struct hw_pci. This field is an array of nr_controllers pointers that will be used to initialize the private_data field of the corresponding controller's pci_sys_data structure. Signed-off-by: Thierry Reding Signed-off-by: Russell King diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index db9fedb..5cf2e97 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -23,6 +23,7 @@ struct hw_pci { #endif struct pci_ops *ops; int nr_controllers; + void **private_data; int (*setup)(int nr, struct pci_sys_data *); struct pci_bus *(*scan)(int nr, struct pci_sys_data *); void (*preinit)(void); diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index da7b0c9..a1f73b5 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -464,6 +464,9 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) sys->map_irq = hw->map_irq; INIT_LIST_HEAD(&sys->resources); + if (hw->private_data) + sys->private_data = hw->private_data[nr]; + ret = hw->setup(nr, sys); if (ret > 0) { -- cgit v0.10.2 From 9520a5bece13b7382f4b0059180f61530c423c81 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 11 Feb 2013 12:25:06 +0100 Subject: ARM: 7649/1: mm: mm->context.id fix for big-endian Since the new ASID code in b5466f8728527a05a493cc4abe9e6f034a1bbaab ("ARM: mm: remove IPI broadcasting on ASID rollover") was changed to use 64bit operations it has broken the BE operation due to an issue with the MM code accessing sub-fields of mm->context.id. When running in BE mode we see the values in mm->context.id are stored with the highest value first, so the LDR in the arch/arm/mm/proc-macros.S reads the wrong part of this field. To resolve this, change the LDR in the mmid macro to load from +4. Acked-by: Will Deacon Signed-off-by: Ben Dooks Signed-off-by: Russell King diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9..7a05111 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -34,6 +34,9 @@ * The ASID is used to tag entries in the CPU caches and TLBs. * The context ID is used by debuggers and trace logic, and * should be unique within all running processes. + * + * In big endian operation, the two 32 bit words are swapped if accesed by + * non 64-bit operations. */ #define ASID_FIRST_VERSION (1ULL << ASID_BITS) #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index eb6aa73..f9a0aa7 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -38,9 +38,14 @@ /* * mmid - get context id from mm pointer (mm->context.id) + * note, this field is 64bit, so in big-endian the two words are swapped too. */ .macro mmid, rd, rn +#ifdef __ARMEB__ + ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ] +#else ldr \rd, [\rn, #MM_CONTEXT_ID] +#endif .endm /* -- cgit v0.10.2 From 251019fb97b67067c34292bb89c19e712f91971c Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 11 Feb 2013 12:25:05 +0100 Subject: ARM: 7650/1: mm: replace direct access to mm->context.id with new macro The mmid macro is meant to be used to get the mm->context.id data from the mm structure, but it seems to have been missed in a cuple of files. Acked-by: Will Deacon Signed-off-by: Ben Dooks Signed-off-by: Russell King diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09c5233..bcaaa8d 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area) ENTRY(cpu_v6_switch_mm) #ifdef CONFIG_MMU mov r2, #0 - ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 6d98c13..78f520b 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -40,7 +40,7 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mov r2, #0 - ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + mmid r1, r1 @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP) #ifdef CONFIG_ARM_ERRATA_430973 diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 7b56386..50bf1da 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -47,7 +47,7 @@ */ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU - ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + mmid r1, r1 @ get mm->context.id and r3, r1, #0xff mov r3, r3, lsl #(48 - 32) @ ASID mcrr p15, 0, r0, r3, c2 @ set TTB 0 -- cgit v0.10.2 From b28748fb5d21d5d64c9ce31579ffbbd41f317042 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 17 Feb 2013 14:40:33 +0000 Subject: ARM: add SCHED_HRTICK config option As we don't include kernel/Kconfig.hz as this defines HZ values unsuitable for ARM platforms, add the SCHED_HRTICK to properly configure the scheduler for hrtimer operation. Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67874b8..a192a5c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1655,6 +1655,9 @@ config HZ default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE default 100 +config SCHED_HRTICK + def_bool HIGH_RES_TIMERS + config THUMB2_KERNEL bool "Compile the kernel in Thumb-2 mode" depends on CPU_V7 && !CPU_V6 && !CPU_V6K -- cgit v0.10.2