summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/gup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/gup.c')
-rw-r--r--arch/powerpc/mm/gup.c60
1 files changed, 36 insertions, 24 deletions
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index 4b921af..c5f734e 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -34,7 +34,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
ptep = pte_offset_kernel(&pmd, addr);
do {
- pte_t pte = *ptep;
+ pte_t pte = ACCESS_ONCE(*ptep);
struct page *page;
if ((pte_val(pte) & mask) != result)
@@ -63,12 +63,18 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
pmdp = pmd_offset(&pud, addr);
do {
- pmd_t pmd = *pmdp;
+ pmd_t pmd = ACCESS_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
+ /*
+ * If we find a splitting transparent hugepage we
+ * return zero. That will result in taking the slow
+ * path which will call wait_split_huge_page()
+ * if the pmd is still in splitting state
+ */
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (pmd_huge(pmd)) {
+ if (pmd_huge(pmd) || pmd_large(pmd)) {
if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
write, pages, nr))
return 0;
@@ -91,7 +97,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
pudp = pud_offset(&pgd, addr);
do {
- pud_t pud = *pudp;
+ pud_t pud = ACCESS_ONCE(*pudp);
next = pud_addr_end(addr, end);
if (pud_none(pud))
@@ -111,12 +117,13 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
+ unsigned long flags;
pgd_t *pgdp;
int nr = 0;
@@ -129,7 +136,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
start, len)))
- goto slow_irqon;
+ return 0;
pr_devel(" aligned: %lx .. %lx\n", start, end);
@@ -150,40 +157,45 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
* So long as we atomically load page table pointers versus teardown,
* we can follow the address down to the the page and take a ref on it.
*/
- local_irq_disable();
+ local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
- pgd_t pgd = *pgdp;
+ pgd_t pgd = ACCESS_ONCE(*pgdp);
pr_devel(" %016lx: normal pgd %p\n", addr,
(void *)pgd_val(pgd));
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
- goto slow;
+ break;
if (pgd_huge(pgd)) {
if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
write, pages, &nr))
- goto slow;
+ break;
} else if (is_hugepd(pgdp)) {
if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
addr, next, write, pages, &nr))
- goto slow;
+ break;
} else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- goto slow;
+ break;
} while (pgdp++, addr = next, addr != end);
- local_irq_enable();
+ local_irq_restore(flags);
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
+}
- {
- int ret;
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ int nr, ret;
+
+ start &= PAGE_MASK;
+ nr = __get_user_pages_fast(start, nr_pages, write, pages);
+ ret = nr;
-slow:
- local_irq_enable();
-slow_irqon:
+ if (nr < nr_pages) {
pr_devel(" slow path ! nr = %d\n", nr);
/* Try to get the remaining pages with get_user_pages */
@@ -192,7 +204,7 @@ slow_irqon:
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
+ nr_pages - nr, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
/* Have to be a bit careful with return values */
@@ -202,9 +214,9 @@ slow_irqon:
else
ret += nr;
}
-
- return ret;
}
+
+ return ret;
}
#endif /* __HAVE_ARCH_PTE_SPECIAL */