summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h1
-rw-r--r--arch/powerpc/include/asm/reg_booke.h8
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S74
-rw-r--r--arch/powerpc/kvm/e500_emulate.c8
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c4
-rw-r--r--arch/powerpc/platforms/85xx/common.c107
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c17
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa.c141
8 files changed, 151 insertions, 209 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 32e470e..dcc733c 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -90,6 +90,7 @@
#define MAS3_SPSIZE 0x0000003e
#define MAS3_SPSIZE_SHIFT 1
+#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK
#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
#define MAS4_INDD 0x00008000 /* Default IND */
#define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 5152c36..8987ca3 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -227,8 +227,12 @@
#define CCR1_TCS 0x00000080 /* Timer Clock Select */
/* Bit definitions for PWRMGTCR0. */
-#define PWRMGTCR0_ALTIVEC_IDLE (1 << 22) /* Altivec idle enable */
-#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
+#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
+#define PWRMGTCR0_PW20_ENT_SHIFT 8
+#define PWRMGTCR0_PW20_ENT 0x3F00
+#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */
+#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16
+#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000
/* Bit definitions for the MCSR. */
#define MCSR_MCS 0x80000000 /* Machine Check Summary */
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index dbeff8c..f5ee308 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -53,12 +53,84 @@ _GLOBAL(__e500_dcache_setup)
isync
blr
+_GLOBAL(has_pw20_altivec_idle)
+ /* 0 false, 1 true */
+ li r3, 0
+
+ /* PW20 & AltiVec idle feature only exists for E6500 */
+ mfspr r0, SPRN_PVR
+ rlwinm r11, r0, 16, 16, 31
+ lis r12, 0
+ ori r12, r12, PVR_VER_E6500@l
+ cmpw r11, r12
+ bne 2f
+
+ /* Fix erratum, e6500 rev1 does not support PW20 & AltiVec idle */
+ rlwinm r11, r0, 0, 16, 31
+ cmpwi r11, 0x20
+ blt 2f
+ li r3, 1
+2:
+ blr
+
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for PW20_WAIT_IDLE_BIT.
+ */
+#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_pw20_idle)
+ mflr r10
+ bl has_pw20_altivec_idle
+ mtlr r10
+ cmpwi r3, 0
+ beq 2f
+
+ mfspr r3, SPRN_PWRMGTCR0
+
+ /* Set PW20_WAIT bit, enable pw20 state*/
+ ori r3, r3, PWRMGTCR0_PW20_WAIT
+ li r11, PW20_WAIT_IDLE_BIT
+
+ /* Set Automatic PW20 Core Idle Count */
+ rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
+
+ mtspr SPRN_PWRMGTCR0, r3
+2:
+ blr
+
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for AV_WAIT_IDLE_BIT.
+ */
+#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_altivec_idle)
+ mflr r10
+ bl has_pw20_altivec_idle
+ mtlr r10
+ cmpwi r3, 0
+ beq 2f
+
+ mfspr r3, SPRN_PWRMGTCR0
+
+ /* Enable Altivec Idle */
+ oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
+ li r11, AV_WAIT_IDLE_BIT
+
+ /* Set Automatic AltiVec Idle Count */
+ rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
+
+ mtspr SPRN_PWRMGTCR0, r3
+2:
+ blr
+
#ifdef CONFIG_PPC_BOOK3E_64
_GLOBAL(__setup_cpu_e6500)
mflr r6
#ifdef CONFIG_PPC64
bl .setup_altivec_ivors
#endif
+ bl .setup_pw20_idle
+ bl .setup_altivec_idle
bl __setup_cpu_e5500
mtlr r6
blr
@@ -121,6 +193,8 @@ _GLOBAL(__setup_cpu_e5500)
_GLOBAL(__restore_cpu_e6500)
mflr r5
bl .setup_altivec_ivors
+ bl .setup_pw20_idle
+ bl .setup_altivec_idle
bl __restore_cpu_e5500
mtlr r5
blr
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index fcbfde9..1e29aa8 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -228,6 +228,10 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
spr_val);
break;
+ case SPRN_PWRMGTCR0:
+ /* Guest relies on host power management configurations */
+ break;
+
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -335,6 +339,10 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
*spr_val = 0;
break;
+ case SPRN_PWRMGTCR0:
+ *spr_val = 0;
+ break;
+
case SPRN_MMUCFG:
*spr_val = vcpu->arch.mmucfg;
break;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 64de70a..9f87220 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -118,11 +118,15 @@ static u32 get_host_mas0(unsigned long eaddr)
{
unsigned long flags;
u32 mas0;
+ u32 mas4;
local_irq_save(flags);
mtspr(SPRN_MAS6, 0);
+ mas4 = mfspr(SPRN_MAS4);
+ mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
mas0 = mfspr(SPRN_MAS0);
+ mtspr(SPRN_MAS4, mas4);
local_irq_restore(flags);
return mas0;
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 09978f5..f63c10d 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -7,32 +7,10 @@
*/
#include <linux/of_platform.h>
-#include <asm/time.h>
-
#include <sysdev/cpm2_pic.h>
#include "mpc85xx.h"
-#define MAX_BIT 64
-
-#define ALTIVEC_COUNT_OFFSET 16
-#define ALTIVEC_IDLE_COUNT_MASK 0x003f0000
-#define PW20_COUNT_OFFSET 8
-#define PW20_IDLE_COUNT_MASK 0x00003f00
-
-/*
- * FIXME - We don't know the AltiVec application scenarios.
- */
-#define ALTIVEC_IDLE_TIME 1000 /* 1ms */
-
-/*
- * FIXME - We don't know, what time should we let the core into PW20 state.
- * because we don't know the current state of the cpu load. And threads are
- * independent, so we can not know the state of different thread has been
- * idle.
- */
-#define PW20_IDLE_TIME 1000 /* 1ms */
-
static struct of_device_id __initdata mpc85xx_common_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
@@ -80,7 +58,6 @@ static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-
void __init mpc85xx_cpm2_pic_init(void)
{
struct device_node *np;
@@ -104,87 +81,3 @@ void __init mpc85xx_cpm2_pic_init(void)
irq_set_chained_handler(irq, cpm2_cascade);
}
#endif
-
-static bool has_pw20_altivec_idle(void)
-{
- u32 pvr;
-
- pvr = mfspr(SPRN_PVR);
-
- /* PW20 & AltiVec idle feature only exists for E6500 */
- if (PVR_VER(pvr) != PVR_VER_E6500)
- return false;
-
- /* Fix erratum, e6500 rev1 does not support PW20 & AltiVec idle */
- if (PVR_REV(pvr) < 0x20)
- return false;
-
- return true;
-}
-
-static unsigned int get_idle_ticks_bit(unsigned int us)
-{
- unsigned int cycle;
-
- /*
- * The time control by TB turn over bit, so we need
- * to be divided by 2.
- */
- cycle = (us / 2) * tb_ticks_per_usec;
-
- return ilog2(cycle) + 1;
-}
-
-static void setup_altivec_idle(void *unused)
-{
- u32 altivec_idle, bit;
-
- if (!has_pw20_altivec_idle())
- return;
-
- /* Enable Altivec Idle */
- altivec_idle = mfspr(SPRN_PWRMGTCR0);
- altivec_idle |= PWRMGTCR0_ALTIVEC_IDLE;
-
- /* Set Automatic AltiVec Idle Count */
- /* clear count */
- altivec_idle &= ~ALTIVEC_IDLE_COUNT_MASK;
-
- /* set count */
- bit = get_idle_ticks_bit(ALTIVEC_IDLE_TIME);
- altivec_idle |= ((MAX_BIT - bit) << ALTIVEC_COUNT_OFFSET);
-
- mtspr(SPRN_PWRMGTCR0, altivec_idle);
-}
-
-static void setup_pw20_idle(void *unused)
-{
- u32 pw20_idle, bit;
-
- if (!has_pw20_altivec_idle())
- return;
-
- pw20_idle = mfspr(SPRN_PWRMGTCR0);
-
- /* set PW20_WAIT bit, enable pw20 */
- pw20_idle |= PWRMGTCR0_PW20_WAIT;
-
- /* Set Automatic PW20 Core Idle Count */
- /* clear count */
- pw20_idle &= ~PW20_IDLE_COUNT_MASK;
-
- /* set count */
- bit = get_idle_ticks_bit(PW20_IDLE_TIME);
- pw20_idle |= ((MAX_BIT - bit) << PW20_COUNT_OFFSET);
-
- mtspr(SPRN_PWRMGTCR0, pw20_idle);
-}
-
-static int __init setup_idle_hw_governor(void)
-{
- on_each_cpu(setup_altivec_idle, NULL, 1);
- on_each_cpu(setup_pw20_idle, NULL, 1);
-
- return 0;
-}
-late_initcall(setup_idle_hw_governor);
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
index 10de0ad..333b20a 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
@@ -523,16 +523,6 @@ static t_LnxWrpFmDev * ReadFmDevTreeNode (struct platform_device *of_dev)
return NULL;
}
- {
- uint32_t svr;
-
- svr = mfspr(SPRN_SVR);
-
- if ((svr & ~SVR_VER_IGNORE_MASK) == SVR_B4860_REV2_VALUE) {
- res.end = 0x80000;
- res.start = 0;
- }
- }
p_LnxWrpFmDev->fmBaseAddr = 0;
p_LnxWrpFmDev->fmPhysBaseAddr = res.start;
@@ -566,6 +556,13 @@ static t_LnxWrpFmDev * ReadFmDevTreeNode (struct platform_device *of_dev)
p_LnxWrpFmDev->fmMuramBaseAddr = 0;
p_LnxWrpFmDev->fmMuramPhysBaseAddr = res.start;
p_LnxWrpFmDev->fmMuramMemSize = res.end + 1 - res.start;
+ {
+ uint32_t svr;
+ svr = mfspr(SPRN_SVR);
+
+ if ((svr & ~SVR_VER_IGNORE_MASK) == SVR_B4860_REV2_VALUE)
+ p_LnxWrpFmDev->fmMuramMemSize = 0x80000;
+ }
}
}
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
index b746d3c..39ebf40 100644
--- a/drivers/staging/fsl_qbman/fsl_usdpaa.c
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
@@ -222,57 +222,24 @@ static struct mem_fragment *split_frag(struct mem_fragment *frag)
return x[2];
}
-/* Conversely, when a fragment is released we look to see whether its
- * similarly-split siblings are free to be reassembled. */
-static struct mem_fragment *merge_frag(struct mem_fragment *frag)
+/* Walk the list of fragments and adjoin neighbouring segments if possible */
+static void compress_frags(void)
{
- /* If this fragment can be merged with its siblings, it will have
- * newbase and newlen as its geometry. */
- uint64_t newlen = frag->len << 2;
- uint64_t newbase = frag->base & ~(newlen - 1);
- struct mem_fragment *tmp, *leftmost = frag, *rightmost = frag;
-
- /* If this fragment is already at root size don't allow merge */
- if (frag->len == frag->root_len)
- return NULL;
- /* Scan left until we find the start */
- tmp = list_entry(frag->list.prev, struct mem_fragment, list);
- while ((&tmp->list != &mem_list) && (tmp->base >= newbase)) {
- if (tmp->refs)
- return NULL;
- if (tmp->len != frag->len)
- return NULL;
- leftmost = tmp;
- tmp = list_entry(tmp->list.prev, struct mem_fragment, list);
- }
- /* Scan right until we find the end */
- tmp = list_entry(frag->list.next, struct mem_fragment, list);
- while ((&tmp->list != &mem_list) && (tmp->base < (newbase + newlen))) {
- if (tmp->refs)
- return NULL;
- if (tmp->len != frag->len)
- return NULL;
- rightmost = tmp;
- tmp = list_entry(tmp->list.next, struct mem_fragment, list);
- }
- if (leftmost == rightmost)
- return NULL;
- /* OK, we can merge */
- frag = leftmost;
- frag->len = newlen;
- frag->pfn_len = newlen >> PAGE_SHIFT;
- while (1) {
- int lastone;
- tmp = list_entry(frag->list.next, struct mem_fragment, list);
- lastone = (tmp == rightmost);
- if (&tmp->list == &mem_list)
- break;
- list_del(&tmp->list);
- kfree(tmp);
- if (lastone)
- break;
+ /* Walk the fragment list and combine fragments */
+ struct mem_fragment *frag, *tmpfrag;
+ list_for_each_entry_safe(frag, tmpfrag, &mem_list, list) {
+ struct mem_fragment *next_frag =
+ list_entry(frag->list.next, struct mem_fragment, list);
+ if (frag->refs == 0 &&
+ frag->len < frag->root_len &&
+ &next_frag->list != &mem_list) {
+ if (next_frag->refs == 0) {
+ /* Merge with next */
+ next_frag->len += frag->len;
+ list_del(&frag->list);
+ }
+ }
}
- return frag;
}
/* Hook from arch/powerpc/mm/mem.c */
@@ -479,8 +446,8 @@ __maybe_unused static void dump_frags(void)
struct mem_fragment *frag;
int i = 0;
list_for_each_entry(frag, &mem_list, list) {
- pr_info("FRAG %d: base 0x%llx len 0x%llx root_len 0x%llx\n",
- i, frag->base, frag->len, frag->root_len);
+ pr_info("FRAG %d: base 0x%llx len 0x%llx root_len 0x%llx refs %d\n",
+ i, frag->base, frag->len, frag->root_len, frag->refs);
++i;
}
}
@@ -594,15 +561,12 @@ static int usdpaa_release(struct inode *inode, struct file *filp)
}
/* Check each fragment and merge if the ref count is 0 */
for (i = 0; i < map->frag_count; i++) {
- if (!--current_frag->refs) {
- struct mem_fragment *frag = current_frag;
- do {
- frag = merge_frag(frag);
- } while (frag);
- }
+ --current_frag->refs;
current_frag = list_entry(current_frag->list.next,
struct mem_fragment, list);
}
+
+ compress_frags();
list_del(&map->list);
kfree(map);
}
@@ -644,8 +608,6 @@ static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
for (i = 0; i < map->frag_count; i++) {
if (frag->pfn_base == vma->vm_pgoff) {
*match = 1;
- if (frag->len != (vma->vm_end - vma->vm_start))
- return -EINVAL;
*pfn = frag->pfn_base;
return 0;
}
@@ -741,6 +703,7 @@ static unsigned long usdpaa_get_unmapped_area(struct file *file,
while (vma) {
if ((addr + len) < vma->vm_start)
return addr;
+
addr = USDPAA_MEM_ROUNDUP(vma->vm_end, len);
vma = vma->vm_next;
}
@@ -952,7 +915,7 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
/* See if the next x fragments are free
and can accomidate the size */
u32 found_size = largest_page;
- next_frag = list_entry(frag->list.next,
+ next_frag = list_entry(frag->list.prev,
struct mem_fragment,
list);
/* If the fragement is too small check
@@ -964,6 +927,10 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
next_frag->len == 0)
break; /* not enough space */
found_size += next_frag->len;
+ next_frag = list_entry(
+ next_frag->list.prev,
+ struct mem_fragment,
+ list);
}
if (found_size >= i->len) {
/* Success! there is enough contigous
@@ -990,20 +957,23 @@ do_map:
BUG_ON(next_frag->len == 0);
while ((next_frag->len + so_far) > i->len) {
/* Split frag until they match */
- split_frag(next_frag);
+ if (next_frag == start_frag)
+ start_frag = next_frag = split_frag(next_frag);
+ else
+ next_frag = split_frag(next_frag);
}
so_far += next_frag->len;
++frag_count;
- next_frag = list_entry(next_frag->list.next,
+ next_frag = list_entry(next_frag->list.prev,
struct mem_fragment, list);
}
/* we need to reserve start count fragments starting at start frag */
- next_frag = start_frag;
for (k = 0; k < frag_count; k++) {
- next_frag->refs++;
- next_frag = list_entry(next_frag->list.next,
- struct mem_fragment, list);
+ start_frag->refs++;
+ if (k+1 != frag_count)
+ start_frag = list_entry(start_frag->list.prev,
+ struct mem_fragment, list);
}
start_frag->flags = i->flags;
@@ -1020,32 +990,23 @@ do_map:
i->phys_addr = start_frag->base;
out:
spin_unlock(&mem_lock);
+
if (!ret) {
unsigned long longret;
- unsigned long next_addr = PAGE_SIZE;
- next_frag = start_frag;
- for (k = 0; k < frag_count; k++) {
- down_write(&current->mm->mmap_sem);
- longret = do_mmap_pgoff(fp, next_addr, next_frag->len,
- PROT_READ |
- (i->flags &
- USDPAA_DMA_FLAG_RDONLY ? 0
- : PROT_WRITE),
- MAP_SHARED,
- next_frag->pfn_base);
- up_write(&current->mm->mmap_sem);
- if (longret & ~PAGE_MASK)
- ret = (int)longret;
- else {
- if (k == 0)
- i->ptr = (void *)longret;
- else
- BUG_ON(next_addr != longret);
- next_addr = longret + next_frag->len;
- }
- next_frag = list_entry(next_frag->list.next,
- struct mem_fragment, list);
- }
+ down_write(&current->mm->mmap_sem);
+ longret = do_mmap_pgoff(fp, PAGE_SIZE, map->total_size,
+ PROT_READ |
+ (i->flags &
+ USDPAA_DMA_FLAG_RDONLY ? 0
+ : PROT_WRITE),
+ MAP_SHARED,
+ start_frag->pfn_base);
+ up_write(&current->mm->mmap_sem);
+ if (longret & ~PAGE_MASK)
+ ret = (int)longret;
+ else
+ i->ptr = (void *)longret;
+
} else
kfree(map);
return ret;