From 94966b712c6875939fcdd83cb2707a797e131a43 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Wed, 29 Oct 2014 21:14:27 +0100 Subject: powerpc: Fix section mismatch warning Add __init to MMU_setup() which uses __initdata boot_command_line. Also MMU_setup() is only called from MMU_init(), which is also __init. Warning appeared since commit 3e47d1474c2b. Fixes: 3e47d1474c2b ("powerpc: Remove powerpc specific cmd_line") Signed-off-by: Fabian Frederick [mpe: Update changelog] Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index cad68ff..415a51b 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -103,7 +103,7 @@ unsigned long __max_low_memory = MAX_LOW_MEM; /* * Check for command-line options that affect what MMU_init will do. */ -void MMU_setup(void) +void __init MMU_setup(void) { /* Check for nobats option (used in mapin_ram). */ if (strstr(boot_command_line, "nobats")) { -- cgit v0.10.2 From 408cddd96e3b155337f9e3aba2198e92e94c6068 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 1 Oct 2014 12:32:30 +0530 Subject: powerpc/fadump: Fix endianess issues in firmware assisted dump handling Firmware-assisted dump (fadump) kernel code is not endian safe. The below patch fixes this issue. Tested this patch with upstream kernel. Below output shows crash tool successfully opening LE fadump vmcore. # crash vmlinux vmcore GNU gdb (GDB) 7.6 This GDB was configured as "powerpc64le-unknown-linux-gnu"... KERNEL: vmlinux DUMPFILE: vmcore CPUS: 16 DATE: Wed Dec 31 19:00:00 1969 UPTIME: 00:03:28 LOAD AVERAGE: 0.46, 0.86, 0.41 TASKS: 268 NODENAME: linux-dhr2 RELEASE: 3.17.0-rc5-7-default VERSION: #6 SMP Tue Sep 30 01:06:34 EDT 2014 MACHINE: ppc64le (4116 Mhz) MEMORY: 40 GB PANIC: "Oops: Kernel access of bad area, sig: 11 [#1]" (check log for details) PID: 6223 COMMAND: "bash" TASK: c0000009661b2500 [THREAD_INFO: c000000967ac0000] CPU: 2 STATE: TASK_RUNNING (PANIC) Signed-off-by: Hari Bathini [mpe: Make the comment in pSeries_lpar_hptab_clear() clearer] Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index a677456..493e72f 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -70,39 +70,39 @@ #define CPU_UNKNOWN (~((u32)0)) /* Utility macros */ -#define SKIP_TO_NEXT_CPU(reg_entry) \ -({ \ - while (reg_entry->reg_id != REG_ID("CPUEND")) \ - reg_entry++; \ - reg_entry++; \ +#define SKIP_TO_NEXT_CPU(reg_entry) \ +({ \ + while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) \ + reg_entry++; \ + reg_entry++; \ }) /* Kernel Dump section info */ struct fadump_section { - u32 request_flag; - u16 source_data_type; - u16 error_flags; - u64 source_address; - u64 source_len; - u64 bytes_dumped; - u64 destination_address; + __be32 request_flag; + __be16 source_data_type; + __be16 error_flags; + __be64 source_address; + __be64 source_len; + __be64 bytes_dumped; + __be64 destination_address; }; /* ibm,configure-kernel-dump header. */ struct fadump_section_header { - u32 dump_format_version; - u16 dump_num_sections; - u16 dump_status_flag; - u32 offset_first_dump_section; + __be32 dump_format_version; + __be16 dump_num_sections; + __be16 dump_status_flag; + __be32 offset_first_dump_section; /* Fields for disk dump option. */ - u32 dd_block_size; - u64 dd_block_offset; - u64 dd_num_blocks; - u32 dd_offset_disk_path; + __be32 dd_block_size; + __be64 dd_block_offset; + __be64 dd_num_blocks; + __be32 dd_offset_disk_path; /* Maximum time allowed to prevent an automatic dump-reboot. */ - u32 max_time_auto; + __be32 max_time_auto; }; /* @@ -174,15 +174,15 @@ static inline u64 str_to_u64(const char *str) /* Register save area header. */ struct fadump_reg_save_area_header { - u64 magic_number; - u32 version; - u32 num_cpu_offset; + __be64 magic_number; + __be32 version; + __be32 num_cpu_offset; }; /* Register entry. */ struct fadump_reg_entry { - u64 reg_id; - u64 reg_value; + __be64 reg_id; + __be64 reg_value; }; /* fadump crash info structure */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 742694c..26d091a 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -58,7 +58,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, const __be32 *sections; int i, num_sections; int size; - const int *token; + const __be32 *token; if (depth != 1 || strcmp(uname, "rtas") != 0) return 0; @@ -72,7 +72,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, return 1; fw_dump.fadump_supported = 1; - fw_dump.ibm_configure_kernel_dump = *token; + fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token); /* * The 'ibm,kernel-dump' rtas node is present only if there is @@ -147,11 +147,11 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, memset(fdm, 0, sizeof(struct fadump_mem_struct)); addr = addr & PAGE_MASK; - fdm->header.dump_format_version = 0x00000001; - fdm->header.dump_num_sections = 3; + fdm->header.dump_format_version = cpu_to_be32(0x00000001); + fdm->header.dump_num_sections = cpu_to_be16(3); fdm->header.dump_status_flag = 0; fdm->header.offset_first_dump_section = - (u32)offsetof(struct fadump_mem_struct, cpu_state_data); + cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data)); /* * Fields for disk dump option. @@ -167,27 +167,27 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, /* Kernel dump sections */ /* cpu state data section. */ - fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG; - fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA; + fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); + fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA); fdm->cpu_state_data.source_address = 0; - fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size; - fdm->cpu_state_data.destination_address = addr; + fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size); + fdm->cpu_state_data.destination_address = cpu_to_be64(addr); addr += fw_dump.cpu_state_data_size; /* hpte region section */ - fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG; - fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION; + fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); + fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION); fdm->hpte_region.source_address = 0; - fdm->hpte_region.source_len = fw_dump.hpte_region_size; - fdm->hpte_region.destination_address = addr; + fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size); + fdm->hpte_region.destination_address = cpu_to_be64(addr); addr += fw_dump.hpte_region_size; /* RMA region section */ - fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG; - fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION; - fdm->rmr_region.source_address = RMA_START; - fdm->rmr_region.source_len = fw_dump.boot_memory_size; - fdm->rmr_region.destination_address = addr; + fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); + fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION); + fdm->rmr_region.source_address = cpu_to_be64(RMA_START); + fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size); + fdm->rmr_region.destination_address = cpu_to_be64(addr); addr += fw_dump.boot_memory_size; return addr; @@ -272,7 +272,7 @@ int __init fadump_reserve_mem(void) * first kernel. */ if (fdm_active) - fw_dump.boot_memory_size = fdm_active->rmr_region.source_len; + fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len); else fw_dump.boot_memory_size = fadump_calculate_reserve_size(); @@ -314,8 +314,8 @@ int __init fadump_reserve_mem(void) (unsigned long)(base >> 20)); fw_dump.fadumphdr_addr = - fdm_active->rmr_region.destination_address + - fdm_active->rmr_region.source_len; + be64_to_cpu(fdm_active->rmr_region.destination_address) + + be64_to_cpu(fdm_active->rmr_region.source_len); pr_debug("fadumphdr_addr = %p\n", (void *) fw_dump.fadumphdr_addr); } else { @@ -472,9 +472,9 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs) { memset(regs, 0, sizeof(struct pt_regs)); - while (reg_entry->reg_id != REG_ID("CPUEND")) { - fadump_set_regval(regs, reg_entry->reg_id, - reg_entry->reg_value); + while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) { + fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id), + be64_to_cpu(reg_entry->reg_value)); reg_entry++; } reg_entry++; @@ -603,20 +603,20 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) if (!fdm->cpu_state_data.bytes_dumped) return -EINVAL; - addr = fdm->cpu_state_data.destination_address; + addr = be64_to_cpu(fdm->cpu_state_data.destination_address); vaddr = __va(addr); reg_header = vaddr; - if (reg_header->magic_number != REGSAVE_AREA_MAGIC) { + if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) { printk(KERN_ERR "Unable to read register save area.\n"); return -ENOENT; } pr_debug("--------CPU State Data------------\n"); - pr_debug("Magic Number: %llx\n", reg_header->magic_number); - pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset); + pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number)); + pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset)); - vaddr += reg_header->num_cpu_offset; - num_cpus = *((u32 *)(vaddr)); + vaddr += be32_to_cpu(reg_header->num_cpu_offset); + num_cpus = be32_to_cpu(*((__be32 *)(vaddr))); pr_debug("NumCpus : %u\n", num_cpus); vaddr += sizeof(u32); reg_entry = (struct fadump_reg_entry *)vaddr; @@ -639,13 +639,13 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) fdh = __va(fw_dump.fadumphdr_addr); for (i = 0; i < num_cpus; i++) { - if (reg_entry->reg_id != REG_ID("CPUSTRT")) { + if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) { printk(KERN_ERR "Unable to read CPU state data\n"); rc = -ENOENT; goto error_out; } /* Lower 4 bytes of reg_value contains logical cpu id */ - cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK; + cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK; if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { SKIP_TO_NEXT_CPU(reg_entry); continue; @@ -692,7 +692,7 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active) return -EINVAL; /* Check if the dump data is valid. */ - if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) || + if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) || (fdm_active->cpu_state_data.error_flags != 0) || (fdm_active->rmr_region.error_flags != 0)) { printk(KERN_ERR "Dump taken by platform is not valid\n"); @@ -828,7 +828,7 @@ static void fadump_setup_crash_memory_ranges(void) static inline unsigned long fadump_relocate(unsigned long paddr) { if (paddr > RMA_START && paddr < fw_dump.boot_memory_size) - return fdm.rmr_region.destination_address + paddr; + return be64_to_cpu(fdm.rmr_region.destination_address) + paddr; else return paddr; } @@ -902,7 +902,7 @@ static int fadump_create_elfcore_headers(char *bufp) * to the specified destination_address. Hence set * the correct offset. */ - phdr->p_offset = fdm.rmr_region.destination_address; + phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address); } phdr->p_paddr = mbase; @@ -951,7 +951,7 @@ static void register_fadump(void) fadump_setup_crash_memory_ranges(); - addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len; + addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); /* Initialize fadump crash info header. */ addr = init_fadump_header(addr); vaddr = __va(addr); @@ -1023,7 +1023,7 @@ void fadump_cleanup(void) /* Invalidate the registration only if dump is active. */ if (fw_dump.dump_active) { init_fadump_mem_struct(&fdm, - fdm_active->cpu_state_data.destination_address); + be64_to_cpu(fdm_active->cpu_state_data.destination_address)); fadump_invalidate_dump(&fdm); } } @@ -1063,7 +1063,7 @@ static void fadump_invalidate_release_mem(void) return; } - destination_address = fdm_active->cpu_state_data.destination_address; + destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address); fadump_cleanup(); mutex_unlock(&fadump_mutex); @@ -1183,31 +1183,31 @@ static int fadump_region_show(struct seq_file *m, void *private) seq_printf(m, "CPU : [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", - fdm_ptr->cpu_state_data.destination_address, - fdm_ptr->cpu_state_data.destination_address + - fdm_ptr->cpu_state_data.source_len - 1, - fdm_ptr->cpu_state_data.source_len, - fdm_ptr->cpu_state_data.bytes_dumped); + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address), + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) + + be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1, + be64_to_cpu(fdm_ptr->cpu_state_data.source_len), + be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped)); seq_printf(m, "HPTE: [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", - fdm_ptr->hpte_region.destination_address, - fdm_ptr->hpte_region.destination_address + - fdm_ptr->hpte_region.source_len - 1, - fdm_ptr->hpte_region.source_len, - fdm_ptr->hpte_region.bytes_dumped); + be64_to_cpu(fdm_ptr->hpte_region.destination_address), + be64_to_cpu(fdm_ptr->hpte_region.destination_address) + + be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, + be64_to_cpu(fdm_ptr->hpte_region.source_len), + be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); seq_printf(m, "DUMP: [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", - fdm_ptr->rmr_region.destination_address, - fdm_ptr->rmr_region.destination_address + - fdm_ptr->rmr_region.source_len - 1, - fdm_ptr->rmr_region.source_len, - fdm_ptr->rmr_region.bytes_dumped); + be64_to_cpu(fdm_ptr->rmr_region.destination_address), + be64_to_cpu(fdm_ptr->rmr_region.destination_address) + + be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1, + be64_to_cpu(fdm_ptr->rmr_region.source_len), + be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); if (!fdm_active || (fw_dump.reserve_dump_area_start == - fdm_ptr->cpu_state_data.destination_address)) + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address))) goto out; /* Dump is active. Show reserved memory region. */ @@ -1215,10 +1215,10 @@ static int fadump_region_show(struct seq_file *m, void *private) " : [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", (unsigned long long)fw_dump.reserve_dump_area_start, - fdm_ptr->cpu_state_data.destination_address - 1, - fdm_ptr->cpu_state_data.destination_address - + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1, + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - fw_dump.reserve_dump_area_start, - fdm_ptr->cpu_state_data.destination_address - + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - fw_dump.reserve_dump_area_start); out: if (fdm_active) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 8c509d5..f6880d2 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "pseries.h" @@ -247,8 +248,17 @@ static void pSeries_lpar_hptab_clear(void) } #ifdef __LITTLE_ENDIAN__ - /* Reset exceptions to big endian */ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) { + /* + * Reset exceptions to big endian. + * + * FIXME this is a hack for kexec, we need to reset the exception + * endian before starting the new kernel and this is a convenient place + * to do it. + * + * This is also called on boot when a fadump happens. In that case we + * must not change the exception endian mode. + */ + if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) { long rc; rc = pseries_big_endian_exceptions(); -- cgit v0.10.2 From 808be31426af57af22268ef0fcb42617beb3d15b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 31 Oct 2014 16:50:57 +1100 Subject: powerpc: do_notify_resume can be called with bad thread_info flags argument Back in 7230c5644188 ("powerpc: Rework lazy-interrupt handling") we added a call out to restore_interrupts() (written in c) before calling do_notify_resume: bl restore_interrupts addi r3,r1,STACK_FRAME_OVERHEAD bl do_notify_resume Unfortunately do_notify_resume takes two arguments, the second one being the thread_info flags: void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) We do populate r4 (the second argument) earlier, but restore_interrupts() is free to muck it up all it wants. My guess is the gcc compiler gods shone down on us and its register allocator never used r4. Sometimes, rarely, luck is on our side. LLVM on the other hand did trample r4. Signed-off-by: Anton Blanchard Cc: stable@vger.kernel.org Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 5bbd1bc..0905c8d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite) 3: #endif bl save_nvgprs + /* + * Use a non volatile GPR to save and restore our thread_info flags + * across the call to restore_interrupts. + */ + mr r30,r4 bl restore_interrupts + mr r4,r30 addi r3,r1,STACK_FRAME_OVERHEAD bl do_notify_resume b ret_from_except -- cgit v0.10.2 From 325e4114043469e5f9923d902b4d30bcc2be8163 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 30 Oct 2014 16:19:13 +1100 Subject: powerpc/powernv: Properly fix LPC debugfs endianness Endian is hard, especially when I designed a stupid FW interface, and I should know better... oh well, this is attempt #2 at fixing this properly. This time it seems to work with all access sizes and I can run my flashing tool (which exercises all sort of access sizes and types to access the SPI controller in the BMC) just fine. Signed-off-by: Benjamin Herrenschmidt CC: stable@vger.kernel.org Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index ad4b31d..e4169d6 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -216,14 +216,54 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, &data, len); if (rc) return -ENXIO; + + /* + * Now there is some trickery with the data returned by OPAL + * as it's the desired data right justified in a 32-bit BE + * word. + * + * This is a very bad interface and I'm to blame for it :-( + * + * So we can't just apply a 32-bit swap to what comes from OPAL, + * because user space expects the *bytes* to be in their proper + * respective positions (ie, LPC position). + * + * So what we really want to do here is to shift data right + * appropriately on a LE kernel. + * + * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that + * order, we have in memory written to by OPAL at the "data" + * pointer: + * + * Bytes: OPAL "data" LE "data" + * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0 + * 16-bit: B0 B1 0000B0B1 B1B00000 + * 8-bit: B0 000000B0 B0000000 + * + * So a BE kernel will have the leftmost of the above in the MSB + * and rightmost in the LSB and can just then "cast" the u32 "data" + * down to the appropriate quantity and write it. + * + * However, an LE kernel can't. It doesn't need to swap because a + * load from data followed by a store to user are going to preserve + * the byte ordering which is the wire byte order which is what the + * user wants, but in order to "crop" to the right size, we need to + * shift right first. + */ switch(len) { case 4: rc = __put_user((u32)data, (u32 __user *)ubuf); break; case 2: +#ifdef __LITTLE_ENDIAN__ + data >>= 16; +#endif rc = __put_user((u16)data, (u16 __user *)ubuf); break; default: +#ifdef __LITTLE_ENDIAN__ + data >>= 24; +#endif rc = __put_user((u8)data, (u8 __user *)ubuf); break; } @@ -263,12 +303,31 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf, else if (todo > 1 && (pos & 1) == 0) len = 2; } + + /* + * Similarly to the read case, we have some trickery here but + * it's different to handle. We need to pass the value to OPAL in + * a register whose layout depends on the access size. We want + * to reproduce the memory layout of the user, however we aren't + * doing a load from user and a store to another memory location + * which would achieve that. Here we pass the value to OPAL via + * a register which is expected to contain the "BE" interpretation + * of the byte sequence. IE: for a 32-bit access, byte 0 should be + * in the MSB. So here we *do* need to byteswap on LE. + * + * User bytes: LE "data" OPAL "data" + * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3 + * 16-bit: B0 B1 0000B1B0 0000B0B1 + * 8-bit: B0 000000B0 000000B0 + */ switch(len) { case 4: rc = __get_user(data, (u32 __user *)ubuf); + data = cpu_to_be32(data); break; case 2: rc = __get_user(data, (u16 __user *)ubuf); + data = cpu_to_be16(data); break; default: rc = __get_user(data, (u8 __user *)ubuf); -- cgit v0.10.2 From 10ccaf178b2b961d8bca252d647ed7ed8aae2a20 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Fri, 31 Oct 2014 15:41:34 -0400 Subject: powerpc: use device_online/offline() instead of cpu_up/down() In powerpc pseries platform dlpar operations, use device_online() and device_offline() instead of cpu_up() and cpu_down(). Calling cpu_up/down() directly does not update the cpu device offline field, which is used to online/offline a cpu from sysfs. Calling device_online/offline() instead keeps the sysfs cpu online value correct. The hotplug lock, which is required to be held when calling device_online/offline(), is already held when dlpar_online/offline_cpu() are called, since they are called only from cpu_probe|release_store(). This patch fixes errors on phyp (PowerVM) systems that have cpu(s) added/removed using dlpar operations; without this patch, the /sys/devices/system/cpu/cpuN/online nodes do not correctly show the online state of added/removed cpus. Signed-off-by: Dan Streetman Cc: Nathan Fontenot Fixes: 0902a9044fa5 ("Driver core: Use generic offline/online for CPU offline/online") Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 6ad83bd..c22bb1b 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -382,7 +382,7 @@ static int dlpar_online_cpu(struct device_node *dn) BUG_ON(get_cpu_current_state(cpu) != CPU_STATE_OFFLINE); cpu_maps_update_done(); - rc = cpu_up(cpu); + rc = device_online(get_cpu_device(cpu)); if (rc) goto out; cpu_maps_update_begin(); @@ -467,7 +467,7 @@ static int dlpar_offline_cpu(struct device_node *dn) if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); cpu_maps_update_done(); - rc = cpu_down(cpu); + rc = device_offline(get_cpu_device(cpu)); if (rc) goto out; cpu_maps_update_begin(); -- cgit v0.10.2